# Copyright 2024 CHINA MERCHANTS BANK CO., LTD.
# Copyright 2024 Huawei Technologies Co., Ltd
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================


import pickle
import os
import argparse
from platform import system
from typing import Dict, List, Tuple
import numpy as np
from functools import partial
from datasets import load_dataset, Dataset
from mindspore.mindrecord import FileWriter


class QWENPreprocessor:
    def __init__(self, tokenizer, seq_length: int):
        self.seq_length = seq_length
        self.eos_id = tokenizer.eos_token_id
        self.pad_id = tokenizer.pad_token_id

    def _pack_samples(self, sequences: List[Tuple[np.array, np.array, np.array, int]]):
        tokens, targets, actual_len = zip(*sequences)
        tokens = np.concatenate(tokens, axis=-1)
        targets = np.concatenate(targets, axis=-1)
        return tokens, targets

    def pad_batch(self, tokens: np.array, targets: np.array, loss_masks: np.array, max_seq_length: int):
        assert len(tokens) <= max_seq_length

        tokens = np.concatenate(([self.pad_id] * (max_seq_length + 1 - len(tokens)), tokens))
        targets = np.concatenate(([-100] * (max_seq_length + 1 - len(targets)), targets))
        loss_masks = np.concatenate(([0] * (max_seq_length + 1 - len(loss_masks)), loss_masks))
        return tokens, targets, loss_masks

    def _get_single_multitask_chat_data(self, text: np.array, loss_mask: np.array, max_seq_length: int):
        tokens = np.array(text)
        targets = np.array(loss_mask) * text + (1 - np.array(loss_mask)) * (-100)
        if len(tokens) > max_seq_length:
            tokens = tokens[: max_seq_length]
            targets = targets[: max_seq_length]
        tokens, targets, loss_masks = self.pad_batch(tokens, targets, loss_mask, max_seq_length=max_seq_length)
        too_long = 0
        if sum(loss_masks) == 0:
            too_long = 1
        return tokens, targets, too_long

    def get_greedily_aggregated_multitask_chat_data(self, texts: List[np.array], loss_masks: List[np.array]):
        sequences, length = [], 0
        for idx, (text, loss_mask) in enumerate(zip(texts, loss_masks)):
            tks, tgts, too_long = self._get_single_multitask_chat_data(text, loss_mask, max_seq_length=self.seq_length)
            if too_long:
                print('too_long_cnt:')
                continue
            sequences.append((tks, tgts, len(text)))
        if len(sequences) == 0:
            return None

        tokens, targets = self._pack_samples(sequences)
        return tokens, targets


def write_to_mindrecord(dataset, tokenizer, args_param):
    from mindspore.mindrecord import FileWriter
    """
    def tuple_to_tensor_dict(tokens, targets, loss_masks, divisions):
    return {
        "tokens": torch.tensor(tokens, dtype=torch.long),
        "targets": torch.tensor(targets, dtype=torch.long),
        "divisions": torch.tensor(divisions, dtype=torch.long),
        "loss_masks": torch.tensor(loss_masks, dtype=torch.long),
    }
    """
    processor = QWENPreprocessor(tokenizer, args_param.seq_length)
    schema = {'input_ids': {"type": "int32", "shape": [-1]},
              'labels': {"type": "int32", "shape": [-1]}}

    if os.path.isdir(args_param.sharegpt_file_path):
        if args_param.sharegpt_file_path.endswith('/'):
            args_param.sharegpt_file_path = args_param.sharegpt_file_path[:-1]
        mindrecord_file_name = args_param.sharegpt_file_path.split('/')[-1] + '.mindrecord'
    else:
        assert args_param.sharegpt_file_path.endswith('.jsonl')
        mindrecord_file_name = args_param.sharegpt_file_path.split('/')[-1][:-6] + '.mindrecord'
    if os.path.exists(os.path.join(args_param.output_dir, mindrecord_file_name)):
        raise ValueError('mindrecord file already exists')
    else:
        os.makedirs(args_param.output_dir, exist_ok=True)
    output_file = os.path.join(args_param.output_dir, mindrecord_file_name)

    file_partition = 1
    dataset_type = "lmdb_chat"
    writer = FileWriter(file_name=output_file, 
                        shard_num=file_partition)
    writer.add_schema(schema, dataset_type)

    def write_to_record(items):
        data = processor.get_greedily_aggregated_multitask_chat_data(*list(zip(*items)))
        if not data:
            return
        sample = {
            'input_ids': np.array(data[0], dtype=np.int32),
            'labels': np.array(data[1], dtype=np.int32)
        }
        writer.write_raw_data([sample])

    items, length = [], 0
    cnt = 0
    dataset_length = len(dataset)
    for i in range(dataset_length):
        items = []
        item = (dataset[i]['input_ids'], dataset[i]['loss_mask'])
        items.append(item)
        cnt += 1
        write_to_record(items)
    writer.commit()
    print(f"Transformed {cnt} samples.")


def add_single_message(tokenized_dict, role, content, tokenizer, loss=False):
    assert role in ["system", "user", "assistant"], role

    sent = f"<|im_start|>{role}\n{content}<|im_end|>\n"

    tokens = tokenizer(sent)["input_ids"]
    loss_mask = [1] * len(tokens) if loss else [0] * len(tokens)
    tokenized_dict["input_ids"].extend(tokens)
    tokenized_dict["loss_mask"].extend(loss_mask)
    return tokenized_dict


def process_sft_batch(data_point, tokenizer):
    '''
    data_point{"id": [id1, id2], "conversations": [[样本1对话messages1], [样本2对话messages2]]}
    '''
    conversations_all = data_point["conversations"]
    ids = data_point["id"]
    systems = None
    if "system" in data_point:
        systems = data_point["system"]

    samples = []
    for order, messages in zip(ids, conversations_all):
        result = {"input_ids": [], "loss_mask": []}
        if systems:
            system_value = systems[order]
            result = add_single_message(result, "system", system_value, tokenizer, loss=False)
        for conv in messages:
            if conv["role"] == "user":
                ''' 用户部分 '''
                result = add_single_message(result, conv["role"], conv["content"], tokenizer, loss=False)
            elif conv["role"] == "assistant":
                ''' 模型回答部分'''
                result = add_single_message(result, conv["role"], conv["content"], tokenizer, loss=True)

        samples.append(result)
    return {"samples": samples}


def finetune_dataset_process(sharegpt_file_path, tokenizer, num_proc=100, file_partition=1, parallel_writer=False):
    json_list = []
    if os.path.isdir(sharegpt_file_path):
        paths = os.walk(sharegpt_file_path)
        for path, dir_list, file_list in paths:
            for file_name in file_list:
                path_tmp = os.path.join(path, file_name)
                if path_tmp.endswith('jsonl'):
                    json_list.append(path_tmp)
    else:
        json_list.append(sharegpt_file_path)
    print(json_list, flush=True)
    datasets_example = load_dataset("json", data_files=json_list, split="train")
    tokenized_dataset_example = datasets_example.map(partial(process_sft_batch, tokenizer=tokenizer), batched=True,
                                                     num_proc=num_proc, remove_columns=datasets_example.column_names)
    return tokenized_dataset_example


def main(args_param):
    vocab_file = args_param.tokenizer_dir
    sharegpt_file_path = args_param.sharegpt_file_path
    from research.qwen2_5.qwen2_5_tokenizer import Qwen2Tokenizer
    merges_file = os.path.join(vocab_file, 'merges.txt')
    vocab_file = os.path.join(vocab_file, 'vocab.json')

    tokenizer_dict = dict(
        vocab_file=vocab_file,
        merges_file=merges_file,
        add_bos_token=False,
        add_eos_token=False,
        model_max_length=32768,
        unk_token="<|endoftext|>",
        pad_token="<|endoftext|>",
        eos_token="<|im_end|>",
    )

    print('vocab_file: ', vocab_file)
    print('merges_file: ', merges_file)
    word_tokenizer = Qwen2Tokenizer(**tokenizer_dict)
    samples = finetune_dataset_process(sharegpt_file_path, word_tokenizer, num_proc=args_param.num_proc, file_partition=1, parallel_writer=False)
    write_to_mindrecord(samples['samples'], word_tokenizer, args_param)
    """
    def tuple_to_tensor_dict(tokens, targets, loss_masks, divisions):
    return {
        "tokens": torch.tensor(tokens, dtype=torch.long),
        "targets": torch.tensor(targets, dtype=torch.long),
        "loss_masks": torch.tensor (loss_masks, dtype=torch.long),
        "divisions": torch.tensor(divisions, dtype=torch.long),
    }
    """


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--seq_length", type=int, default=4096)
    parser.add_argument("--num_proc", type=int, default=16)
    parser.add_argument("--tokenizer_dir", type=str, default="/path/qwen25_7b")
    parser.add_argument("--sharegpt_file_path", type=str, default="/path/dataset_QA4.json")
    parser.add_argument("--output_dir", type=str, default="/path/outputs/")
    args = parser.parse_args()
    print(args)
    main(args)

    # 数据集jsonl文件，数据内容如下示例，其中"system"为可选项
    '''
    {"id": 0, "system": "你是一个AI助手", "conversations": [{"role": "user", "content":"请参考文档中的信息，找出能够回答问题的最相关的答案。\n文档：\n摘要：解冻的问题是ICE吸收的能量比水少。这导致冷冻＞物品首先在一个点熔化；解决方案是慢慢加热并给予加热时间以融化整个冷冻物品，然后开始煮一小块。除霜只是使用较低的平均功率来为熔化的部件提供时间来加热附近的冷冻部件。它可以通过两种方式实现：1）低功率或2）脉冲电源接通然后关闭。除霜使用的功率远低于\"高\"循环。这有助于保持食物的纤维质量，使其不太可能变硬。此外，如果有计划腌肉，慢慢解冻的肉将浸泡腌料，坚韧的肉不会产生你想要的味道。;\n\n问题：将＞微波炉设置为除霜模式而不是常规加热模式时，微波炉有什么不同？\n要求：(1）答案需准确且出自文档材料。\n(2）如果文档没有问题的相关答案，请回复：文档缺乏相关信息，无法根据文档信息回答问题。\n答案："}, {"role": "assistant", "content": "当微波炉设置为除霜模式而不是常规加热模式时，它会使用较低的平均功率来为熔化的部分提供时间来加热附近的冷冻部分。这可以通过两种方式实现：1）低功率或2）脉冲电源＞接通然后关闭。除霜模式使用的功率远低于\"高\"循环，这有助于保持食物的纤维质量，使其不太可能变硬。文档中提到，这有助于食物解冻更加均匀，避免部分食物过热而部分食物仍处于冷冻状态。"}]}
    {"id": 1, "system": "你是一个AI助手", "conversations": [{"role": "user", "content":"请参考文档中的信息，找出能够回答问题的最相关的答案。\n文档：\n摘要：解冻的问题是ICE吸收的能量比水少。这导致＞冷冻＞物品首先在一个点熔化；解决方案是慢慢加热并给予加热时间以融化整个冷冻物品，然后开始煮一小块。除霜只是使用较低的平均功率来为熔化的部件提供时间来加热附近的冷冻部件。它可以通过两种方式实现：1)＞低功率或2）脉冲电源接通然后关闭。除霜使用的功率远低于\"高\"循环。这有助于保持食物的纤维质量，使其不太可能变硬。此外，如果有计划腌肉，慢慢解冻的肉将浸泡腌料，坚韧的肉不会产生你想要的味道。;\n\n问＞题：将＞微波炉设置为除霜模式而不是常规加热模式时，微波炉有什么不同？\n要求：(1）答案需准确且出自文档材料。\n(2）如果文档没有问题的相关答案，请回复：文档缺乏相关信息，无法根据文档信息回答问题。\n答案："),{"role": "assistant", "content": "当微波炉设置为除霜模式而不是常规加热模式时，它会使用较低的平均功率来为熔化的部分提供时间来加热附近的冷冻部分。这可以通过两种方式实现：1）低功率或2）脉
冲电源＞接通然后关闭。除霜模式使用的功率远低于\"高\"循环，这有助于保持食物的纤维质量，使其不太可能变硬。文档中提到，这有助于食物解冻更加均匀，避免部分食物过热而部分食物仍处于冷冻状态。"}]}
    '''


