import json
from tqdm import tqdm

import datasets
import transformers
import os,shutil,pathlib


def preprocess(tokenizer, config, example, max_seq_length):
    prompt = example["context"]
    target = example["target"]
    prompt_ids = tokenizer.encode(prompt, max_length=max_seq_length, truncation=True)
    target_ids = tokenizer.encode(
        target,
        max_length=max_seq_length,
        truncation=True,
        add_special_tokens=False)
    input_ids = prompt_ids + target_ids + [config.eos_token_id]
    return {"input_ids": input_ids, "seq_len": len(prompt_ids)}



def read_jsonl(model_name,path, max_seq_length,callback, skip_overlength=False):
    tokenizer = transformers.AutoTokenizer.from_pretrained(
        model_name, trust_remote_code=True)
    config = transformers.AutoConfig.from_pretrained(
        model_name, trust_remote_code=True, device_map='auto')
    with open(path, "r") as f:
        lines = f.readlines()
        step=0
        for line in tqdm(lines,desc="tokenize..."):
            example = json.loads(line)
            feature = preprocess(tokenizer, config, example, max_seq_length)
            if skip_overlength and len(feature["input_ids"]) > max_seq_length:
                continue
            feature["input_ids"] = feature["input_ids"][:max_seq_length]
            step += 1
            total = len(lines)
            if callback is not None:
                callback(step,total)
            yield feature


def tokenize(model_name,input,output,max_seq_length,callback,skip_overlength=False):
    print(input,"-->",output,"-->",model_name,"-->",str(max_seq_length),"-->",str(skip_overlength))
    # 如果out存在则删除output
    if os.path.exists(output) and os.path.isdir(output):
        shutil.rmtree(output)
    # 删除dataset缓存文件
    datasetPath = str(pathlib.Path.home()) + "/.cache/huggingface/datasets"
    if os.path.exists(datasetPath) and os.path.isdir(datasetPath):
        shutil.rmtree(datasetPath)

    dataset = datasets.Dataset.from_generator(
        lambda: read_jsonl(model_name,input, max_seq_length,callback, skip_overlength)
    )
    dataset.save_to_disk(output)




if __name__ == "__main__":
    # main(
    def callback(percent,message):
       pass
    tokenize("/Users/sakoo/Workspace/AI/AutoChatGML/models/chatglm-6b","data/alpaca_data.jsonl","data/alpaca_data",200,callback)