from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List, Sequence, Any
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
import numpy as np
import random

from utils.misc import *
from .preprocess import preprocess_sft


class LazySupervisedDataset(Dataset):
    """Dataset for supervised fine-tuning."""

    def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int):
        super(LazySupervisedDataset, self).__init__()
        self.tokenizer = tokenizer
        self.max_len = max_len

        rank0_print("Formatting inputs...Skip in lazy mode")
        self.tokenizer = tokenizer
        self.raw_data = raw_data
        self.cached_data_dict = {}

        self.api_pool = get_api_pool()

    def __len__(self):
        return len(self.raw_data)

    def __getitem__(self, i) -> Dict[str, torch.Tensor]:
        if i in self.cached_data_dict:
            return self.cached_data_dict[i]

        if "input_ids" in self.raw_data[i]:
            ret = dict(
                input_ids=torch.tensor(self.raw_data[i]["input_ids"], dtype=torch.int),
                labels=torch.tensor(self.raw_data[i]["labels"], dtype=torch.int),
                attention_mask=torch.tensor(self.raw_data[i]["attention_mask"], dtype=torch.bool),
            )
        else:
            ret = preprocess_sft([self.raw_data[i]["conversations"]], self.tokenizer, self.max_len, "You are a helpful assistant." if "system_prompt" not in self.raw_data[i] else self.raw_data[i]["system_prompt"], self.raw_data[i], self.api_pool)
            ret = dict(
                input_ids=ret["input_ids"][0],
                labels=ret["labels"][0],
                attention_mask=ret["attention_mask"][0],
            )
        self.cached_data_dict[i] = ret

        return ret


class SupervisedDataset(Dataset):

    """Dataset for supervised fine-tuning."""

    def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int):
        super(SupervisedDataset, self).__init__()

        self.data = raw_data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, i) -> Dict[str, torch.Tensor]:
        return dict(
            input_ids=torch.tensor(self.data[i]["input_ids"], dtype=torch.int),
            labels=torch.tensor(self.data[i]["labels"], dtype=torch.int),
            attention_mask=torch.tensor(self.data[i]["attention_mask"], dtype=torch.bool),
        )