import copy
import sys
import logging
from typing import Dict, Sequence
import torch
import json
import concurrent.futures
from tqdm import tqdm
import os
from transformers import AutoTokenizer
import pdb
import multiprocessing
from functools import partial

def jload(data_path):
    with open(data_path, 'r', encoding='utf-8') as f:
        return [json.loads(line) for line in f]
def tokenize_example(example, HFtokenizer, tokenizer):
    # single part
    history = [
        {"role": "user", "content": example['input']}
    ]
    src = HFtokenizer.apply_chat_template(history, add_generation_prompt=True, tokenize=True)[0] # for batch
    tgt = HFtokenizer.tokenizer.encode(example['output']) + [HFtokenizer.eos_token_id]
    task_tag = example['task_tag'] if 'task_tag' in example else None
    # tgt = HFtokenizer.apply_chat_template([{"role": "assistant", "content": example['output']}], add_generation_prompt=False, tokenize=True)[0] # for batch
    # pdb.set_trace()
    # src = HFtokenizer.get_prefix_tokens() + HFtokenizer.build_single_message("user", "", example['input']) + [HFtokenizer.get_command("<|assistant|>")]
    # tgt = tokenizer.encode(example['output']) + [tokenizer.eos_id]

    return src, tgt, task_tag
def parallel_tokenize(data_dict, HFtokenizer, tokenizer, num_threads):

    partial_tokenize_example = partial(tokenize_example, HFtokenizer=HFtokenizer, tokenizer=tokenizer)
    # with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
    #     results = list(executor.map(tokenize_example, data_dict))
    # results = []
    # for sample in data_dict:
    #     results.append(tokenize_example(sample))
    with multiprocessing.Pool(processes=num_threads) as pool:
        results = pool.map(partial_tokenize_example, data_dict)

    sources, targets, tags = zip(*results)
    return sources, targets, tags


def process_sources_targets(sources, targets, tags,  max_length, tokenizer, sample_merge):
    input_ids = []
    target_ids = []
    task_tags = []
    last_input_ids = []
    last_target = []
    last_tags = []
    for src, tgt, task_tag in tqdm(zip(sources, targets, tags)):
        input_id = src + tgt
        src_len = len(src)
        tgt_len = len(tgt)
        label = copy.deepcopy(input_id)
        #pad_token_id = HFtokenizer.pad_token_id 
        pad_token_id = 0
        #pad_token_id = -100
        label[:src_len] = [pad_token_id] * src_len
        if len(label) > max_length + 1:
            continue # no truncate
        if sample_merge:
            # use last input
            if len(last_input_ids) + len(input_id) < max_length: #  merge to near max_length
                last_input_ids.extend(input_id)
                last_target.extend(label)

                last_tags.append(task_tag)
            else:
                input_ids.append(pad_and_tensor(last_input_ids, max_length + 1, pad_token_id))
                target_ids.append(pad_and_tensor(last_target, max_length + 1, pad_token_id))

                task_tags.append([0]) # don't use task tag 
                last_input_ids = input_id
                last_target = label

                last_tags = [task_tag]
        else:
            input_ids.append(pad_and_tensor(input_id, max_length + 1, pad_token_id))
            target_ids.append(pad_and_tensor(label, max_length + 1, pad_token_id))

            task_tags.append([task_tag])
        
    if len(last_input_ids) > 0:
        input_ids.append(pad_and_tensor(last_input_ids, max_length + 1, pad_token_id))
        target_ids.append(pad_and_tensor(last_target, max_length + 1, pad_token_id))
        if not sample_merge:
            task_tags.append(last_tags)
        else:
            task_tags.append([0])
    return input_ids, target_ids, task_tags

def pad_and_tensor(id_list, max_length, pad_id):
    diff = max_length - len(id_list)
    if diff < 0:
        return torch.tensor(id_list[:max_length], dtype=torch.int64)
    else:
        return torch.tensor(id_list + [pad_id] * diff, dtype=torch.int64)

def preprocess_and_save(data_path, HFtokenizer, max_length=30032, num_threads=4, output_path='processed_data.pt', sample_merge=False):
    logging.warning("Loading data...")
    list_data_dict = jload(data_path)

    logging.warning("Formatting and tokenizing inputs...")
    tokenizer = HFtokenizer.tokenizer
    sources, targets, tags = parallel_tokenize(list_data_dict, HFtokenizer, tokenizer, num_threads)

    logging.warning("Processing sources and targets...")
    input_ids, labels, task_tags = process_sources_targets(sources, targets, tags, max_length, tokenizer, sample_merge=sample_merge)

    logging.warning(f"Saving processed data to {output_path}...")
    torch.save({'input_ids': input_ids, 'labels': labels, 'task_tags': task_tags}, output_path)
    logging.warning(f"final num sequences: {len(input_ids)}")
    logging.warning("Processing and saving complete.")

# Example usage
data_path = '/apdcephfs/share_976139/users/adrenzhou/nlp_workdir/ChatGLM-Tuning/data/data_v15.2.train.jsonl.hunyuan.json'
data_path = "debug.json"
data_path = sys.argv[1]
max_length = int(sys.argv[2])
sample_merge = False
if len(sys.argv) >= 4:
    sample_merge = True if sys.argv[3] == 'true' else False
sample_merge_str = 'no_sample_merge' if not sample_merge else 'sample_merge'

HFtokenizer = '/teaspeech_ceph/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/thudm/glm-4-9b'
HFtokenizer = AutoTokenizer.from_pretrained(HFtokenizer, trust_remote_code=True)
preprocess_and_save(data_path, HFtokenizer, output_path=data_path + f".glm4.mg.{sample_merge_str}.pt", max_length=max_length, sample_merge=sample_merge, num_threads=32)
