import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import time
import math
import pickle
from contextlib import nullcontext
import numpy as np
import torch
from model import Transformer, ModelArgs
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DistributedDataParallel as DDP

from dataset import PretrainDataset
import logging

#========
from chatglm_tokenizer.tokenization_chatglm import ChatGLMTokenizer
tokenizer=ChatGLMTokenizer(vocab_file='./chatglm_tokenizer/tokenizer.model')

data_path_list=[
    './data/pretrain_data.bin'
]
train_ds = PretrainDataset(data_path_list, max_length=1024,memmap=True)




for step, (X, Y) in enumerate(train_ds):
    print("=====")      
    #print(tokenizer.decode(X.tolist()))
    print(X.tolist())
    print("----------")
    #print(tokenizer.decode(Y.tolist()))
    print(Y.tolist())
    
