from datasets import load_dataset, Dataset

def extract_hash_answer(text: str) -> str:
    if "####" not in text:
        return None
    return text.split("####")[1].strip()

def get_gsm8k_question(split: str = "train", has_prompt_format: bool = False) -> Dataset:
    data = load_dataset("openai/gsm8k", "main")[split]

    if has_prompt_format:
        data = data.map(lambda x: {
            'prompt': [
                {'role': 'system', 'content': """A conversatino between User (name: Gökdeniz) and Assistant (name: Josie).The Gökdeniz asks a question, and Josie solves it.
Josie first thinks about the reasoning process in the mind and then provides Gökdeniz with the answer. The reasoning process and answer are enclosed within <josie_thinks> </josie_thinks> and <josie_answers> </josie_answers> tags, respectively, i.e., <josie_thinks> reasoning process here </josie_thinks>
<josie_answers> answer here </josie_answers>."""},
                {'role': 'user', 'content': x['question']}
            ],
            'answer': extract_hash_answer(x['answer'])
        })
    else:
        data = data.map(lambda x: {
            'prompt': f"""A conversatino between User (name: Gökdeniz) and Assistant (name: Josie).The Gökdeniz asks a question, and Josie solves it.
Josie first thinks about the reasoning process in the mind and then provides Gökdeniz with the answer. The reasoning process and answer are enclosed within <josie_thinks> </josie_thinks> and <josie_answers> </josie_answers> tags, respectively, i.e., <josie_thinks> reasoning process here </josie_thinks>
<josie_answers> answer here </josie_answers>. User: {x['question']}. Assistant: """,
            'answer': extract_hash_answer(x['answer'])
        })

    return data