# Copyright 2024 CHINA MERCHANTS BANK CO., LTD.
# Copyright 2024 Huawei Technologies Co., Ltd
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================


import os
import time
import argparse
import numpy as np
from tqdm import tqdm
from functools import partial
from datasets import load_dataset, Dataset
from mindspore.mindrecord import FileWriter


def chunks(lst, n):
    """ yield n sized chunks from list"""
    for i in range(0, len(lst), n):
        yield lst[i:i + n]


def process_func_tokenized(data_point, tokenizer, seq_length, model, repeat=1):

    content = []
    sentences = data_point['text']
    for sentence in sentences:
        if model == 'glm12b':
            content += tokenizer(sentence['input_ids'][2:] + [151329])
        elif model == "qwen2_5":
            content += tokenizer(sentence)['input_ids'] + [tokenizer.eos_token_id]
        else:
            content += tokenizer(sentence)['input_ids']
    content_out = []
    for _ in range(repeat):
        content_out.extend(content)

    content = content_out
    input_ids = []
    for chunk in chunks(content, seq_length):
        if len(chunk) == seq_length:
            input_ids.append(chunk)

    return {"input_ids": np.array(input_ids, dtype=np.int32)}


def pretrain_dataset_process(model, ori_data_file_path, output_data_file_path, tokenizer, seq_length,
                                    num_proc=100, file_partition=1, parallel_writer=False):
    json_list = []
    if os.path.isdir(ori_data_file_path):
        paths = os.walk(ori_data_file_path)
        for path, dir_list, file_list in paths:
            for file_name in file_list:
                path_tmp = os.path.join(path, file_name)
                if path_tmp.endswith('jsonl'):
                    json_list.append(path_tmp)
    else:
        json_list.append(ori_data_file_path)
    print(json_list, flush=True)
    datasets_example = load_dataset("json", data_files=json_list, split="train")
    tokenized_dataset_example = datasets_example.map(partial(process_func_tokenized, tokenizer=tokenizer,
                                                             seq_length=seq_length, model=model), batched=True,
                                                     num_proc=num_proc, remove_columns=datasets_example.column_names)

    time1 = time.time()
    pd_dataset = tokenized_dataset_example.to_pandas()
    print('tokenized_dataset_example', tokenized_dataset_example, flush=True)
    convert_pandas_time = time.time() - time1
    print(f"convert pandas time is: {convert_pandas_time}")

    # using mindrecord api to save preprocess results.
    schema = {'input_ids': {"type": "int32", "shape": [-1]}}
    # set file writer
    writer = FileWriter(file_name=output_data_file_path,
                        shard_num=file_partition,
                        overwrite=True)
    writer.add_schema(schema, desc="pt_dataset")

    with tqdm(total=len(pd_dataset)) as pbar:
        pbar.set_description("Writing mindrecords")
        transforms_count = 0
        transform_steps = 10000
        while transforms_count < len(pd_dataset):
            if transforms_count + transform_steps >= len(pd_dataset):
                # end of dataframe, write the end and exit.
                data_samples = pd_dataset.iloc[transforms_count:].to_dict('records')
                pbar.update(len(pd_dataset) - transforms_count)
                transforms_count = len(pd_dataset)
            else:
                # stice a range of dato and convert to dict list
                data_samples = pd_dataset.iloc[transforms_count:transforms_count + transform_steps].to_dict('records')
                transforms_count += transform_steps
                pbar.update(transform_steps)
            # write to mindrecord
            writer.write_raw_data(data_samples, parallel_writer=parallel_writer)

    print("Transformed {} records.".format(transforms_count))
    # write done, commit it
    writer.commit()
    assert output_data_file_path.endswith(".mindrecord")
    with open(output_data_file_path[:-11] + ".info", 'w', encoding='utf8') as count_file:
        count_file.write(str(transforms_count))
    print(f"Transform finished, output files refer: {output_data_file_path}")

if __name__ == "__main__":
    work_path = os.path.dirname(os.path.abspath(__file__))
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model',
        default='qwen2_5',
        type=str,
        required=True,
        help='model name')
    parser.add_argument(
        '--input_file_path_or_dir',
        default=None,
        type=str,
        required=True,
        help='json file path')
    parser.add_argument(
        '--output_dir',
        default=None,
        type=str,
        required=True,
        help='mindrecord file path')
    parser.add_argument(
        '--vocab_dir',
        default='model/Qwen25/Qwen14_base/',
        type=str,
        required=True,
        help='vocab file path')
    parser.add_argument(
        '--seq_length',
        default=8192,
        type=int,
        required=True,
        help='seq length')
    parser.add_argument(
        '--num_proc',
        default=64,
        type=int,
        required=False,
        help='mul process num')
    parser.add_argument(
        '--file_partition',
        default=1,
        type=int,
        help='how many split files would be saved')
    args_, rest_args_ = parser.parse_known_args()
    if args_.model == "llama3":
        from research.llama3.llama3_tokenizer import Llama3Tokenizer
        word_tokenizer = Llama3Tokenizer(vocab_file=args_.vocab_file, add_bos_token=True)
    elif args_.model == "glm12b":
        from research.glm12b.glm_tokenizer import GLMTokenizer
        word_tokenizer = GLMTokenizer(vocab_file=args_.vocab_file)
    elif args_.model == "qwen2_5":
        from research.qwen2_5.qwen2_5_tokenizer import Qwen2Tokenizer
        merges_file = os.path.join(args_.vocab_file, 'merges.txt')
        vocab_file = os.path.join(args_.vocab_file, 'vocab.json')
        print('vocab_file: ', vocab_file)
        print('merges_file: ', merges_file)
        word_tokenizer = Qwen2Tokenizer(vocab_file, merges_file, add_bos_token=False, add_eos_token=False)
    else:
        raise ValueError(f'{args_.mode} is not currently supported, the list of supported models is [llama3, glm12b, qwen2_5]')
    if os.path.isdir(args_.ori_file_path):
        if args_.ori_file_path.endswith('/'):
            args_.ori_file_path = args_.ori_file_path[:-1]
        mindrecord_file_name = args_.ori_file_path.split('/')[-1] + '.mindrecord'
    else:
        assert args_.ori_file_path.endswith('.jsonl')
        mindrecord_file_name = args_.ori_file_path.split('/')[-1][:-6] + '.mindrecord'
    if os.path.exists(os.path.join(args_.output_file_path, mindrecord_file_name)):
        raise ValueError('mindrecord file already exists')
    else:
        os.makedirs(args_.output_file_path, exist_ok=True)
    args_.output_file_path = os.path.join(args_.output_file_path, mindrecord_file_name)

    pretrain_dataset_process(model=args_.model,
                             ori_data_file_path=args_.ori_file_path,
                             output_data_file_path=args_.output_file_path,
                             tokenizer=word_tokenizer,
                             seq_length=int(args_.seq_length + 1),
                             num_proc=64,
                             file_partition=1,
                             parallel_writer=False)
