import re
import os
from datasets import Dataset, load_dataset
import random
from typing import List, Tuple
from tqdm import tqdm
from verl.utils.hdfs_io import copy, makedirs
import argparse
import pandas as pd
import json

import numpy as np
import math

def get_stage2_guidance(current_step, total_training_steps):
    
    t_stage2 = current_step / total_training_steps
        
    p_simple = 0.5 * (1 + math.cos(t_stage2 * math.pi))
    p_none = 1 - p_simple
    
    return {"simple": p_simple, "none": p_none}

def sort_win(datasets):
    sorted_dataset = datasets.sort("win_num", reverse=True)
    return sorted_dataset

ach_template = """Analyze according to the Analysis of Competing Hypotheses(ACH) provided below:
**Step 1: Propose Hypotheses**

  - Restate the user's core question.
  - Treat each agent's primary conclusion as a distinct, competing hypothesis. List all of them exhaustively.
      - `H1 (from Agent 1): [Conclusion of Agent 1]`
      - `H2 (from Agent 2): [Conclusion of Agent 2]`
      - `...`

**Step 2: List Evidence**

  - Systematically extract all discrete facts, data points, and key arguments from the agent responses. This is your "Evidence Log".
  - Tag each piece of evidence with its source.
      - `E1 (from Agent 1): [Fact/Argument 1]`
      - `E2 (from Agent 2): [Fact/Argument 2]`
      - `...`

**Step 3: Construct Hypothesis-Evidence Matrix**

  - Create a matrix with Hypotheses (H1, H2, ...) as columns and Evidence (E1, E2, ...) as rows.
  - Evaluate the relationship between each piece of evidence and each hypothesis, marking the cell as:
      - `(+)` for **Consistent**.
      - `(-)` for **Inconsistent**.
      - `(0)` for **Irrelevant**.

**Step 4: Refine the Matrix**

  - Critically re-examine the matrix you just created. Look for logical loopholes or questionable ratings.
  - Ask yourself: "Are there any ratings that seem contradictory? For example, if E1 and E2 are similar, why do they have different ratings against H3?"
  - Document any corrections made to the matrix and provide a brief justification for each change. If no changes are needed, state "Matrix deemed consistent and final."

**Step 5: Draw Preliminary Conclusion**

  - Based on the **refined matrix**, identify the hypothesis with the **fewest inconsistency markers (`-`)**. This is your preliminary conclusion.
  - State clearly which hypothesis has been selected as the current front-runner.

**Step 6: Challenge the Conclusion (Devil's Advocate)**

  - Now, actively challenge your preliminary conclusion. Assume for a moment that it is wrong.
  - Present the strongest possible counter-argument. Which pieces of evidence (especially those marked `-` for your chosen hypothesis) pose the most significant challenge to its validity?
  - This step is a stress test to ensure your conclusion is robust.

**Step 7: State Final Analytic Report**

  - Synthesize the findings from all previous steps, especially the matrix analysis and the Devil's Advocate challenge.
  - Your final report must contain two parts:
      - **A. Internal Decision Justification:** A comprehensive explanation of the reasoning process. State the final decision, explain why it was chosen over others (referencing the inconsistency scores), and discuss how the Devil's Advocate challenge was resolved.
      - **B. Final Answer for the User:** The polished, clear, and direct answer for the end-user, presented helpfully and without the internal analytical jargon.
"""


system_prompt_template = """You are a Senior Decider Agent, operating as the final analytical authority within a multi-agent system. You will be given a user question and the responses from three agents. Your mission is to critically evaluate the potentially conflicting and incomplete responses from several subordinate agents, in order to produce a single, unified, and highly reliable final answer for the user. Your primary directive is to favor logic and evidence over consensus or confidence, and reason step by step."""

user_prompt_template_1 = """
Question: {question}

The following are the responses from the agents:
Agent 1: {answer_1}
Agent 2: {answer_2}
Agent 3: {answer_3}

{ach_template}

Please conduct an analysis based on the Analysis of Competing Hypotheses(ACH). Your analysis and reasoning should be included within the <think> and </think> tags. Finally, provide your final answer within the <answer> and </answer> tags. Finish your answer with \"the answer is (X)\" where X is the correct letter choice.
Let's think step by step.
<think>
"""

user_prompt_template_2 = """
Question: {question}

The following are the responses from the agents:
Agent 1: {answer_1}
Agent 2: {answer_2}
Agent 3: {answer_3}


Please conduct an analysis based on the Analysis of Competing Hypotheses(ACH). Your analysis and reasoning should be included within the <think> and </think> tags. Finally, provide your final answer within the <answer> and </answer> tags. Finish your answer with \"the answer is (X)\" where X is the correct letter choice.
Let's think step by step.
<think>
"""

user_prompt_template_3 = """
Question: {question}

The following are the responses from the agents:
Agent 1: {answer_1}
Agent 2: {answer_2}
Agent 3: {answer_3}


Your analysis and reasoning should be included within the <think> and </think> tags. Finally, provide your final answer within the <answer> and </answer> tags. Finish your answer with \"the answer is (X)\" where X is the correct letter choice.
Let's think step by step.
<think>
"""

# assistant_prompt_template = """
# Please act as a Decision - Maker and solve this step by step.
# <think>"""

data_source = 'agent_cdm'

# train_data_root_path = "/llm/nankai/xuyang_space/data/AM-DeepSeek-Distilled-40M/agent_cdm/raw/data20250620/rl/raw/train.jsonl"

train_data_root_path = "/llm/nankai/xuyang_space/data/AM-DeepSeek-Distilled-40M/agent_cdm/raw/data20250717/train/all.jsonl"

test_data_root_path = "/llm/nankai/xuyang_space/data/AM-DeepSeek-Distilled-40M/agent_cdm/raw/data20250717/test/all.jsonl"

save_dir = "/llm/nankai/xuyang_space/data/AM-DeepSeek-Distilled-40M/agent_cdm/processed/data20250717_step2"

train_dataset = load_dataset('json', data_files=train_data_root_path)
test_dataset = load_dataset('json', data_files=test_data_root_path)

train_dataset = train_dataset["train"]
test_dataset = test_dataset["train"]

train_len = len(train_dataset)
# train_dataset = sort_win(train_dataset)

def train_make_map_fn(split):
    def process_fn(example, idx):
        if "data" in example:
            example = example["data"]
        models = ["glm-4-9b-chat", "Mistral-7B-Instruct-v0.3", "Meta-Llama-3-8B-Instruct"]
        question = example[models[0]]["question"]
        solution = example[models[0]]["raw_input"]["target"]

        agent1 = example[models[0]]["answer"].split("</think>")[-1].strip()
        agent2 = example[models[1]]["answer"].split("</think>")[-1].strip()
        agent3 = example[models[2]]["answer"].split("</think>")[-1].strip()

        global train_len
        probabilities = get_stage2_guidance(idx, train_len)
        prompt_types = ["simple", "none"]
        prompt_probs = [probabilities["simple"], probabilities["none"]]
        # 根据概率选择一个Prompt类型
        chosen_prompt_type = np.random.choice(prompt_types, p=prompt_probs)

        if chosen_prompt_type == "simple":
            question_content = user_prompt_template_2.format(
                question=question,
                answer_1=agent1,
                answer_2=agent2,
                answer_3=agent3,
            )
        else:
            # 如果选择了 "none"，则不使用 ACH 模板
            question_content = user_prompt_template_3.format(
                question=question,
                answer_1=agent1,
                answer_2=agent2,
                answer_3=agent3,
            )

        data = {
            "data_source": data_source,
            "prompt": [
                {
                    "role": "system",
                    "content": system_prompt_template,
                },
                {
                    "role": "user",
                    "content": question_content,
                }
            ],
            "ability": "math",
            "reward_model": {
                "style": "rule",
                "ground_truth": solution
            },
            "extra_info": {
                'split': split,
                'index': idx,
                "template_type": chosen_prompt_type,
                "dataset_length": train_len,
            }
        }
        return data
    return process_fn

def test_make_map_fn(split):
    def process_fn(example, idx):
        if "data" in example:
            example = example["data"]
        models = ["glm-4-9b-chat", "Mistral-7B-Instruct-v0.3", "Meta-Llama-3-8B-Instruct"]
        question = example[models[0]]["question"]
        solution = example[models[0]]["raw_input"]["target"]

        agent1 = example[models[0]]["answer"].split("</think>")[-1].strip()
        agent2 = example[models[1]]["answer"].split("</think>")[-1].strip()
        agent3 = example[models[2]]["answer"].split("</think>")[-1].strip()

        question_content = user_prompt_template_3.format(
            question=question,
            answer_1=agent1,
            answer_2=agent2,
            answer_3=agent3,
        )

        data = {
            "data_source": data_source,
            "prompt": [
                {
                    "role": "system",
                    "content": system_prompt_template,
                },
                {
                    "role": "user",
                    "content": question_content,
                }
            ],
            "ability": "math",
            "reward_model": {
                "style": "rule",
                "ground_truth": solution
            },
            "extra_info": {
                'split': split,
                'index': idx,
            }
        }
        return data
    return process_fn

    
# train_dataset = train_dataset.filter(should_keep, with_indices=True)


# train_dataset = train_dataset.map(function=train_make_map_fn('train'), with_indices=True)
train_dataset = train_dataset.map(function=train_make_map_fn('train'), with_indices=True)
test_dataset = test_dataset.map(function=test_make_map_fn('test'), with_indices=True)


train_dataset.to_parquet(os.path.join(save_dir, 'train.parquet'))
test_dataset.to_parquet(os.path.join(save_dir, 'test.parquet'))