# coding: utf-8

import torch
from torch.utils.data import Dataset
import json
from transformers import AutoTokenizer

class BuildDataset(Dataset):
    def __init__(self, path, block_size=512):
        super().__init__()

        # 加载模型和分词器，采用transformers加载本地tokenizer
        self.enc = AutoTokenizer.from_pretrained("/usr/local/data/teamwork/llms_zero/gpt2_tokenizer", trust_remote_code=True)
        
        self.block_size = block_size  # pos 最大长度
        self.encoded_data = []
        # 特殊符号分割文本
        if self.enc.pad_token_id is None:
            self.enc.pad_token_id = self.enc.eos_token_id
        self.eos_token = self.enc.eos_token_id
        
        raw_data = []
        self.max_lines = 1000
        with open(path, "r", encoding="utf-8") as f:
            for i, line in enumerate(f):
                data = json.loads(line.strip())
                if i >= self.max_lines:
                   break
                try:
                    text = data["text"]
                    raw_data.append(text)
                except Exception as e:
                    continue
        
        # 确保每个编码的文本长度相同
        for text in raw_data:
            encoded_text = self.enc.encode(
                text,
                add_special_tokens=True,
                truncation=True,  # 添加截断选项
                return_tensors="pt",
                max_length=self.enc.model_max_length  # 使用模型的最大长度
            ).squeeze()  # 去掉多余的维度
            
            # 填充或截断
            if len(encoded_text) < self.block_size + 1:
                encoded_text = torch.cat([encoded_text, torch.tensor([self.eos_token] * (self.block_size + 1 - len(encoded_text)))], dim=0)
            elif len(encoded_text) > self.block_size + 1:
                encoded_text = encoded_text[:self.block_size + 1]
            
            self.encoded_data.append(encoded_text)
        
        # 转换为tensor
        self.encoded_data = torch.stack(self.encoded_data, dim=0)
        
    def __len__(self):
        return len(self.encoded_data)

    def __getitem__(self, idx):
        # 在处理数据时，就进行文本的shift操作，输入是 前512个token，输出是 后512个token
        chunk = self.encoded_data[idx]
        x = chunk[:-1]
        y = chunk[1:]
        return x, y

    def encode(self, text):
        # 将文本编码为token ids
        return self.enc.encode(text, add_special_tokens=True)
    
    def decode(self, ids):
        # 将token ids解码为文本
        return self.enc.decode(ids)

