import copy
import sys
import logging
from typing import Dict, Sequence
import torch
import json
import concurrent.futures
from tqdm import tqdm
import os
from transformers import AutoTokenizer
import pdb
import multiprocessing
from functools import partial

def jload(data_path):
    with open(data_path, 'r', encoding='utf-8') as f:
        return [json.loads(line) for line in f]

def initialize_tokenizer():
    global local_HFtokenizer  # 声明全局变量
    #local_HFtokenizer = '/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-72B'
    local_HFtokenizer = '/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2.5-1.5B'
    local_HFtokenizer = AutoTokenizer.from_pretrained(local_HFtokenizer, trust_remote_code=True)

def tokenize_example(example):
    # single part
    history = [
        {"role": "user", "content": example['input']}
    ]
    src = local_HFtokenizer.apply_chat_template(history, add_generation_prompt=True, tokenize=True) 
    #src_str = HFtokenizer.apply_chat_template(history, add_generation_prompt=True, tokenize=False)
    
    tgt = local_HFtokenizer.encode(example['output']) + [local_HFtokenizer.eos_token_id]
    return src, tgt
def parallel_tokenize(data_dict, HFtokenizer, tokenizer, num_threads):
    global local_HFtokenizer
    #partial_tokenize_example = partial(tokenize_example, HFtokenizer=HFtokenizer, tokenizer=tokenizer)
    partial_tokenize_example = partial(tokenize_example)
    with multiprocessing.Pool(processes=num_threads, initializer=initialize_tokenizer) as pool:
        results = list(tqdm(pool.imap(partial_tokenize_example, data_dict), total=len(data_dict)))
        #results = pool.map(partial_tokenize_example, data_dict)

    sources, targets = zip(*results)
    return sources, targets

def process_sources_targets(sources, targets, max_length, HFtokenizer, sample_merge):
    input_ids = []
    target_ids = []
    last_input_ids = []
    last_target = []
    for src, tgt in tqdm(zip(sources, targets)):
        input_id = src + tgt
        src_len = len(src)
        tgt_len = len(tgt)
        label = copy.deepcopy(input_id)
        #pad_token_id = HFtokenizer.pad_token_id 
        pad_token_id = 0 # force to zero
        #pad_token_id = -100
        label[:src_len] = [pad_token_id] * src_len

        if sample_merge:
            # use last input
            if len(last_input_ids) + len(input_id) < max_length: #  merge to near max_length
                last_input_ids.extend(input_id)
                last_target.extend(label)
            else:
                input_ids.append(pad_and_tensor(last_input_ids, max_length + 1, pad_token_id))
                target_ids.append(pad_and_tensor(last_target, max_length + 1, pad_token_id))
                last_input_ids = input_id
                last_target = label
        else:
            input_ids.append(pad_and_tensor(input_id, max_length + 1, pad_token_id))
            target_ids.append(pad_and_tensor(label, max_length + 1, pad_token_id))
        
    if len(last_input_ids) > 0:
        input_ids.append(pad_and_tensor(last_input_ids, max_length + 1, pad_token_id))
        target_ids.append(pad_and_tensor(last_target, max_length + 1, pad_token_id))

    return input_ids, target_ids

def pad_and_tensor(id_list, max_length, pad_id):
    diff = max_length - len(id_list)
    if diff < 0:
        return torch.tensor(id_list[:max_length], dtype=torch.int64)
    else:
        return torch.tensor(id_list + [pad_id] * diff, dtype=torch.int64)

def preprocess_and_save(data_path, HFtokenizer, max_length=30032, num_threads=4, output_path='processed_data.pt', sample_merge=False):
    logging.warning("Loading data...")
    list_data_dict = jload(data_path)

    logging.warning("Formatting and tokenizing inputs...")
    tokenizer = HFtokenizer
    sources, targets = parallel_tokenize(list_data_dict, HFtokenizer, tokenizer, num_threads)

    logging.warning("Processing sources and targets...")
    input_ids, labels = process_sources_targets(sources, targets, max_length, HFtokenizer, sample_merge=sample_merge)

    logging.warning(f"Saving processed data to {output_path}...")
    torch.save({'input_ids': input_ids, 'labels': labels}, output_path)
    logging.warning(f"final num sequences: {len(input_ids)}")
    logging.warning("Processing and saving complete.")

# Example usage
data_path = '/apdcephfs/share_976139/users/adrenzhou/nlp_workdir/ChatGLM-Tuning/data/data_v15.2.train.jsonl.hunyuan.json'
data_path = "debug.json"
data_path = sys.argv[1]
max_length = int(sys.argv[2])
sample_merge = True
# HFtokenizer = '/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/thudm/chatglm3-6b-base'
HFtokenizer = '/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2.5-1.5B'
#HFtokenizer = '/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-72B'
HFtokenizer = AutoTokenizer.from_pretrained(HFtokenizer, trust_remote_code=True)
preprocess_and_save(data_path, HFtokenizer, output_path=data_path + f".qwen2.sample_merge.pt", max_length=max_length, sample_merge=sample_merge, num_threads=100)
