import copy
import sys
import logging
from typing import Dict, Sequence
import torch
import json
import concurrent.futures
from tqdm import tqdm
import os
from transformers import AutoTokenizer
import pdb 
import multiprocessing
from functools import partial

def jload(data_path):
    with open(data_path, 'r', encoding='utf-8') as f:
        return [json.loads(line) for line in f]
    
def tokenize_example(example, HFtokenizer, tokenizer):
    src = HFtokenizer.get_prefix_tokens() + HFtokenizer.build_single_message("user", "", example['input']) + [HFtokenizer.get_command("<|assistant|>")]
    tgt = tokenizer.encode(example['output']) + [tokenizer.eos_id]
    task_tag = example['task_tag'] if 'task_tag' in example else None
    return src, tgt, task_tag

def parallel_tokenize(data_dict, HFtokenizer, tokenizer, num_threads):
    # 使用 functools.partial 将额外参数绑定到 tokenize_example 函数
    partial_tokenize_example = partial(tokenize_example, HFtokenizer=HFtokenizer, tokenizer=tokenizer)
    
    with multiprocessing.Pool(processes=num_threads) as pool:
        results = pool.map(partial_tokenize_example, data_dict)

    sources, targets, tags = zip(*results)
    return sources, targets, tags

def process_sources_targets(sources, targets, tags,  max_length, tokenizer, sample_merge):
    input_ids = []
    target_ids = []
    task_tags = []
    last_input_ids = []
    last_target = []    
    last_tags = []
    pad_token_id = HFtokenizer.pad_token_id     
    for src, tgt, task_tag in tqdm(zip(sources, targets, tags)):
        input_id = src + tgt
        src_len = len(src)
        tgt_len = len(tgt)
        label = copy.deepcopy(input_id)
        label[:src_len] = [tokenizer.pad_id] * src_len
        if len(label) > max_length + 1:
            continue # skip truncate samples
        if sample_merge:
            # use last input
            if len(last_input_ids) + len(input_id) < max_length: #  merge to near max_length
                last_input_ids.extend(input_id)
                last_target.extend(label)
                last_tags.append(task_tag)
            else:
                input_ids.append(pad_and_tensor(last_input_ids, max_length + 1, pad_token_id))
                target_ids.append(pad_and_tensor(last_target, max_length + 1, pad_token_id))
                task_tags.append([0]) # don't use task tag 

                last_input_ids = input_id
                last_target = label
                last_tags = [task_tag]
        else:
            input_ids.append(pad_and_tensor(input_id, max_length + 1, pad_token_id))
            target_ids.append(pad_and_tensor(label, max_length + 1, pad_token_id))
            task_tags.append([task_tag])
        
    if len(last_input_ids) > 0:
        input_ids.append(pad_and_tensor(last_input_ids, max_length + 1, pad_token_id))
        target_ids.append(pad_and_tensor(last_target, max_length + 1, pad_token_id))
        if not sample_merge:
            task_tags.append(last_tags)
        else:
            task_tags.append([0])

    return input_ids, target_ids, task_tags

def pad_and_tensor(id_list, max_length, pad_id):
    diff = max_length - len(id_list)
    if diff < 0:
        return torch.tensor(id_list[:max_length], dtype=torch.int64)
    else:
        return torch.tensor(id_list + [pad_id] * diff, dtype=torch.int64)

def preprocess_and_save(data_path, HFtokenizer, max_length=30032, num_threads=4, output_path='processed_data.pt', sample_merge=False):
    logging.warning("Loading data...")
    list_data_dict = jload(data_path)

    logging.warning("Formatting and tokenizing inputs...")
    tokenizer = HFtokenizer.tokenizer
    sources, targets, tags = parallel_tokenize(list_data_dict, HFtokenizer, tokenizer, num_threads)

    logging.warning("Processing sources and targets...")
    input_ids, labels, task_tags = process_sources_targets(sources, targets, tags, max_length, tokenizer, sample_merge=sample_merge)

    logging.warning(f"Saving processed data to {output_path}...")
    torch.save({'input_ids': input_ids, 'labels': labels, 'task_tags': task_tags}, output_path)
    logging.warning(f"final num sequences: {len(input_ids)}")
    logging.warning("Processing and saving complete.")

# Example usage
data_path = '/apdcephfs/share_976139/users/adrenzhou/nlp_workdir/ChatGLM-Tuning/data/data_v15.2.train.jsonl.hunyuan.json'
data_path = "debug.json"
data_path = sys.argv[1]
max_length = int(sys.argv[2])
sample_merge = False
if len(sys.argv) >= 4:
    sample_merge = True if sys.argv[3] == 'true' else False
sample_merge_str = 'no_sample_merge' if not sample_merge else 'sample_merge'

HFtokenizer = '/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/thudm/chatglm3-6b-base'
HFtokenizer = AutoTokenizer.from_pretrained(HFtokenizer, trust_remote_code=True)
preprocess_and_save(data_path, HFtokenizer, output_path=data_path + f".glm3.{sample_merge_str}.pt", max_length=max_length, sample_merge=sample_merge, num_threads=100)
