import os
import random

import torch
from datasets import load_dataset as hf_load_dataset, concatenate_datasets
from transformers import TrainerCallback, TrainingArguments, TrainerState, TrainerControl, StoppingCriteria, \
    StoppingCriteriaList


def format_prompt(example, tokenizer) -> str:
    if 'instruction' in example and 'output' in example:
        messages = []
        if "<think>" in example["output"]:
            if len(example["output"]) % 2 == 0:
                system_prompt = "深入思考並用推理回答我的問題."
            else:
                system_prompt = "Think deeply and answer my questions with reasoning."
            messages.append({"role": "system", "content": system_prompt})

        if len(example['input']) > 0:
            if len(example['input']) % 2 == 0 or len(messages) == 1:
                messages.append({"role": "user", "content": example["instruction"] + "\n\n" + example['input']})
            else:
                messages.append({"role": "system", "content": example['instruction']})
                messages.append({"role": "user", "content": example['input']})
        else:
            messages.append({"role": "user", "content": example["instruction"]})

        messages.append({"role": "assistant", "content": example["output"]})
    elif 'messages' in example:
        messages = example['messages']
    elif 'conversations' in example:
        messages = []
        if 'system' in example and len(example['system']) > 0:
            messages.append({"role": 'system', "content": example['system']})

        for conv in example['conversations']:
            if conv['from'] == 'human':
                role = 'user'
            else:
                role = 'assistant'

            messages.append({"role": role, "content": conv['value']})
    elif 'text' in example:
        return example['text']
    else:
        raise Exception("Unknown example format")

    return tokenizer.apply_chat_template(messages, tokenize=False)


def load_single_dataset(dataset_name: str, split: str = None):
    if dataset_name.lower().endswith('.json') or dataset_name.lower().endswith('.jsonl'):
        return hf_load_dataset('json', data_files=dataset_name, split=split)
    elif dataset_name.lower().endswith('.csv'):
        return hf_load_dataset('csv', data_files=dataset_name, split=split)
    elif os.path.isdir(dataset_name):
        return hf_load_dataset(dataset_name, split=split)
    else:
        return hf_load_dataset(dataset_name, split=split)


def load_multi_datasets(dataset_name: str, tokenizer, num_proc: int, max_seq_length: int, sampling: float):
    if "," not in dataset_name:
        dataset_names = [dataset_name]
    else:
        dataset_names = dataset_name.split(",")

    datasets = [load_single_dataset(data_name, split='train') for data_name in dataset_names]

    datasets = [dataset.map(
        lambda x: tokenizer(format_prompt(x, tokenizer), truncation=False),
        batched=False,
        num_proc=num_proc,
        remove_columns=dataset.column_names,
        desc="Pre-tokenizing dataset"
    ) for dataset in datasets]

    datasets = [dataset.filter(
        lambda x: len(x['input_ids']) < max_seq_length,
        batched=False,
        num_proc=num_proc,
        desc="Truncating dataset"
    ) for dataset in datasets]

    def sampling_dataset(dataset,  sampling):
        dataset_size = len(dataset)
        sampling_num = int(dataset_size * sampling)
        sampling_list = random.sample(list(range(dataset_size)), sampling_num)
        return dataset.select(sampling_list)

    if sampling < 1.0:
        datasets = [sampling_dataset(dataset, sampling) for dataset in datasets]

    if len(datasets) == 1:
        dataset = datasets[0]
    else:
        dataset = concatenate_datasets(datasets)

    return dataset


class StopOnTokens(StoppingCriteria):
    def __init__(self, tokenizer):
        self.tokenizer = tokenizer

    def __call__(self, input_ids, scores, **kwargs):
        return input_ids[0][-1] == self.tokenizer.eos_token_id  # 或自定义的 <|im_end|> token id


class EvaluationCallback(TrainerCallback):
    def __init__(self, check_steps: int):
        self.check_steps = check_steps

    def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
        if self.check_steps == 0 or ((state.global_step + 1) % self.check_steps != 0):
            return

        model = kwargs['model']
        tokenizer = kwargs['processing_class']

        model.eval()
        prompt = """<|im_start|>user\n小红6岁，小明比小红大3岁，小明多少岁？<|im_end|>\n<|im_start|>assistant\n"""
        input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids.cuda()
        outputs = model.generate(input_ids, max_new_tokens=args.max_seq_length - len(input_ids), do_sample=True, temperature=0.7)
        output = tokenizer.decode(outputs[0], skip_special_tokens=False)
        print(output)

        prompt = """<|im_start|>user\n太阳系有多大？<|im_end|>\n<|im_start|>assistant\n"""
        input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids.cuda()
        outputs = model.generate(input_ids, max_new_tokens=args.max_seq_length - len(input_ids), do_sample=True,
                                 temperature=0.7)
        output = tokenizer.decode(outputs[0], skip_special_tokens=False)
        print(output)
        model.train()
