
from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List, Sequence, Any
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
import numpy as np
import random
import copy

from utils.misc import *
from config.special_tokens import SPECIAL_TOKENS_MAP, ROLES_MAP

IGNORE_TOKEN_ID = LabelSmoother.ignore_index


def preprocess_sft(
    sources,
    tokenizer: transformers.PreTrainedTokenizer,
    max_len: int,
    system_message: Any = "You are a helpful assistant.",
    extra_info=None,
    api_pool=None,
) -> Dict:
    assert "LLM_NAME" in os.environ and os.environ["LLM_NAME"] in SPECIAL_TOKENS_MAP
    llm_name = os.environ["LLM_NAME"]
    special_tokens_map = SPECIAL_TOKENS_MAP[llm_name]
    roles_map = ROLES_MAP[llm_name]

    system_message = system_message.replace("SYSTEM: ", "")
    sources = list(map(glaive2qwen, sources))
    input_ids, targets = [], []
    for source in sources:
        input_ids_, targets_ = get_targets(tokenizer, source, system_message, roles_map, special_tokens_map)

        if "DEBUG" in os.environ and os.environ["DEBUG"] == "mask":
            vis_mask(input_ids_, targets_, tokenizer)
        input_ids.append(input_ids_[:max_len])
        targets.append(targets_[:max_len])

    input_ids_out = torch.tensor(input_ids, dtype=torch.int)
    targets_out = torch.tensor(targets, dtype=torch.int)

    return dict(
        input_ids=input_ids_out,
        labels=targets_out,
        attention_mask=input_ids_out.ne(tokenizer.pad_token_id),
    )


def get_targets(tokenizer, convs, system_message, roles_map, special_tokens_map):
    """

    {role_start} {role} {role_end} {between_role_and_content} {content} {eot} {between_eot_and_role_start}

    """
    utt_template = "{role_start}{role}{role_end}{between_role_and_content}{content}{eot}{between_eot_and_role_start}"

    global_start, role_start, role_end, between_role_and_content, eot, between_eot_and_role_start = \
        special_tokens_map["global_start"], special_tokens_map["role_start"], special_tokens_map["role_end"], special_tokens_map["between_role_and_content"], special_tokens_map["eot"], special_tokens_map["between_eot_and_role_start"]

    try:
        if convs[0]["from"] != "USER":
            # rank0_print(f"first role in conversation is not USER, but {convs[0]['from']}")
            # print(f"first role in conversation is not USER, but {convs[0]['from']}")
            convs = convs[1:]
        convs = [{"from": "SYSTEM", "value": system_message.lstrip()}] + convs
    except:
        print(convs)
        # import pdb; pdb.set_trace()

    input_ids_all, target_ids_all = [], []

    def hyper_encode(input_str):
        out = tokenizer.encode(input_str)
        if "LLM_NAME" in os.environ and os.environ["LLM_NAME"] == "phi3" and 29871 in out:  # 29871 => ''
            out.remove(29871)
        return out
    for conv in convs:
        # role, content = conv["from"], conv["value"].lstrip()
        role, content = conv["from"], conv["value"]
        utt = utt_template.format(
            role=roles_map[role],
            content=content,
            global_start=global_start,
            role_start=role_start,
            role_end=role_end,
            between_role_and_content=between_role_and_content,
            eot=eot,
            between_eot_and_role_start=between_eot_and_role_start,
        )
        input_ids = hyper_encode(utt)

        if role in ["ASSISTANT"]:
            content_part = hyper_encode(content)
        else:
            content_part = len(hyper_encode(content)) * [IGNORE_TOKEN_ID]

        target_ids = hyper_encode(role_start) + len(hyper_encode(roles_map[role])) * [IGNORE_TOKEN_ID] + hyper_encode(role_end) + \
            len(hyper_encode(between_role_and_content)) * [IGNORE_TOKEN_ID] + \
            content_part + \
            hyper_encode(eot) + \
            len(hyper_encode(between_eot_and_role_start)) * [IGNORE_TOKEN_ID]

        try:
            assert len(input_ids) == len(target_ids), vis_mask(input_ids, target_ids, tokenizer)
        except:
            from IPython import embed; embed()
        input_ids_all += input_ids
        target_ids_all += target_ids

    input_ids_all = hyper_encode(global_start) + input_ids_all
    target_ids_all = hyper_encode(global_start) + target_ids_all

    return input_ids_all, target_ids_all


def vis_mask(input_ids, target_ids, tokenizer):
    rank0_print(f"原始文本({len(input_ids)})tokens)=============")
    rank0_print(tokenizer.decode(input_ids))
    target_vis = copy.deepcopy(target_ids)
    for i, v in enumerate(target_vis):
        if v == IGNORE_TOKEN_ID:
            target_vis[i] = tokenizer.encode("#")[0]
    rank0_print(f"mask后的文本({len(target_ids)})=============")
    rank0_print(tokenizer.decode(target_vis))


def glaive2qwen(convs):
    roles = ["SYSTEM", "USER", "ASSISTANT", "FUNCTION RESPONSE"]
    for conv in convs:
        if "role" in conv and "content" in conv:
            role_key, content_key = "role", "content"
        elif "from" in conv and "value" in conv:
            role_key, content_key = "from", "value"
        else:
            print(conv)
            raise ValueError
        for role in roles:
            if conv[role_key].find(role) >= 0:
                conv[role_key] = role
                break
        conv[role_key] = conv[role_key].upper()
        conv["from"] = conv[role_key]
        conv["value"] = conv[content_key]
        if conv["value"].find(" <|endoftext|>") >= 0:
            conv["value"] = conv["value"].replace(" <|endoftext|>", "")
        if conv["from"] == "ASSISTANT" and conv["value"].find("<functioncall>") >= 0:
            conv["value"] = conv["value"].replace("<functioncall>", "<|function_start|>") + "<|function_end|>"
    return convs
