

import json
import os
from typing import Dict, List
import transformers

import numpy as np
import random

from utils.misc import *
from .sft_dataset import LazySupervisedDataset, SupervisedDataset

from datasets import load_dataset, concatenate_datasets
import datasets
from copy import deepcopy
import wcwidth



def make_supervised_data_module(
    tokenizer: transformers.PreTrainedTokenizer, data_args, max_len,
) -> Dict:
    """Make dataset and collator for supervised fine-tuning."""
    dataset_cls = (
        LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
    )
    rank0_print("Loading data...")

    # train_json = json.load(open(data_args.data_path, "r"))
    train_json = load_train_data_from_args(data_args)
    train_dataset = dataset_cls(train_json, tokenizer=tokenizer, max_len=max_len)

    if data_args.eval_data_path:
        eval_json = json.load(open(data_args.eval_data_path, "r"))
        eval_dataset = dataset_cls(eval_json, tokenizer=tokenizer, max_len=max_len)
    else:
        eval_dataset = None

    return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)

def read_json(path):
    with open(path, "r", encoding="utf-8") as f:
        return json.load(f)

def read_jsonl(path):
    data = []
    with open(path, 'r', encoding="utf-8") as f:
        for line in f:
            data.append(json.loads(line))
    return data

def auto_read(path):
    if path.endswith(".json"):
        return read_json(path)
    elif path.endswith(".jsonl"):
        return read_jsonl(path)

def load_train_data_from_args(data_args):
    """
        根据传入参数加载各子数据集，并进行过/欠采样
    """
    paths_and_weights = data_args.data_path.split(",")
    paths, weights = [], []
    for path_and_weight in paths_and_weights:
        parts = path_and_weight.split("#")
        if len(parts) == 1:
            paths.append(parts[0])
            weights.append(1.0)
        elif len(parts) == 2:
            paths.append(parts[0])
            weights.append(float(parts[1]))
        else:
            raise ValueError

    def get_sample_inds(dataset_len, sample_weight):
        inds = list(range(dataset_len))
        replace = False
        out = []
        if sample_weight <= 1:
            sample_num = int(dataset_len * sample_weight)
            out.extend(list(map(int, np.random.choice(inds, sample_num, replace=replace))))
        else:
            assert int(sample_weight) - sample_weight == 0, "请确保是整数倍过采样"
            for _ in range(int(sample_weight)):
                sample_num = dataset_len
                out.extend(list(map(int, np.random.choice(inds, sample_num, replace=replace))))
        return out

    train_data = None
    train_data_infos = []
    cache_dir = "./hf_cache/traindata_sft_jsonls"
    for path, weight in zip(paths, weights):
        if path.endswith(".jsonl"):
            data = load_dataset("json", data_files=path, cache_dir=cache_dir, num_proc=128, split="train")
        elif path.endswith(".json"):
            data = auto_read(path)
        else:
            print(path)
            data = datasets.load_from_disk(path, keep_in_memory=False)["train"]
        select_inds = get_sample_inds(len(data), weight)
        rank0_print(f"{os.path.basename(path)} length: {len(data)}## downsample length {len(select_inds)}")
        train_data_infos.append([os.path.basename(path), len(data), weight, len(select_inds)])
        if isinstance(data, datasets.arrow_dataset.Dataset) and not path.endswith(".jsonl"):
            if weight == 1:
                sample_data = data
            else:
                sample_data = data.select(select_inds)
            if train_data is None:
                train_data = sample_data
            else:
                if isinstance(train_data, datasets.arrow_dataset.Dataset):
                    train_data = concatenate_datasets([train_data, sample_data])
                else:
                    train_data += sample_data.to_list()
        else:
            if weight == 1:
                sample_data = [data[ind] for ind in range(len(data))]
            else:
                sample_data = [data[ind] for ind in select_inds]
            if train_data is None:
                train_data = sample_data
            else:
                train_data += sample_data
    
    def has_empty_turn(item):
        has_empty = False
        if len(item["conversations"]) == 0:
            return True
        for conv in item["conversations"]:
            content_key = None
            if "content" in conv:
                content_key = "content"
            elif "value" in conv:
                content_key = "value"
            else:
                raise NotImplementedError
        
            if len(conv[content_key].strip()) == 0:
                has_empty = True
                break
        return has_empty

    train_data_filtered = []
    has_empty_items = []
    for item in train_data:
        if has_empty_turn(item):
            has_empty_items.append(item)
        else:
            train_data_filtered.append(item)
    print(f"{len(has_empty_items)} samples has empty turn")
    return train_data_filtered

