import json
import numpy as np
from torch.utils.data import Dataset

class ChatGLM1Dataset(Dataset):
    def __init__(self, file, tokenizer, max_seq_length):
        self.tokenizer = tokenizer
        self.bos_token_id = tokenizer.bos_token_id
        self.eos_token_id = tokenizer.eos_token_id
        self.max_seq_length = max_seq_length

        with open(file, 'r', encoding='utf8') as f:
            data_list = f.readlines()
        # logger.info("there are {} data in dataset".format(len(data_list)))
        self.data_list = data_list

    def __len__(self):
        return len(self.data_list)

    def __getitem__(self, index):
        # 每条数据格式为: <s>input1</s>target1</s>input2</s>target2</s>...
        data = self.data_list[index]
        data = json.loads(data)

        # 收集多轮对话
        utterances = []

        utterances += ([data["instruction"], data["output"]])
        utterances_ids = self.tokenizer(utterances, add_special_tokens=False).input_ids

        # 模型的输入格式为：<s>input1</s>target1</s>input2</s>target2</s>...
        input_ids = []
        target_mask = []  # 用于对input进行mask，只计算target部分的loss
        for i, utterances_id in enumerate(
                utterances_ids):  # 这里面是是很多个list，形如[[],[],[],[]]这样依次取出来组成，把list依次构成，然后在最后一个字符后加上2
            input_ids += utterances_id
            # input部分
            if i % 2 == 0:
                target_mask += [0] * (len(utterances_id))
            # target部分
            else:
                input_ids += [self.eos_token_id]
                target_mask += [1] * (len(utterances_id) + 1)
        assert len(input_ids) == len(target_mask)

        input_ids = input_ids[:self.max_seq_length]
        target_mask = target_mask[:self.max_seq_length]

        attention_mask = [1] * len(input_ids)
        assert len(input_ids) == len(target_mask) == len(attention_mask)
        inputs = {
            'input_ids': input_ids,
            'attention_mask': attention_mask,
            'target_mask': target_mask
        }
        return inputs


from modelscope import  AutoTokenizer, BitsAndBytesConfig

checkpoint_path="C:\\Users\\16014\\.cache\\modelscope\\hub\\models\\ZhipuAI\\chatglm3-6b"

if __name__ == '__main__':
    tokenizer = AutoTokenizer.from_pretrained(checkpoint_path, trust_remote_code=True)
    # dataset = ChatGLM1Dataset("../data/dev.jsonl", tokenizer, max_seq_length=180)
    # dataset.__getitem__(1)
    dataset = ChatGLM1Dataset("../data/dev.jsonl", tokenizer, max_seq_length=180)
    dataset.__getitem__(1)
