from datasets import load_dataset
from torch.utils.data import DataLoader, TensorDataset
from transformers import GPT2Tokenizer
from geesibling.adapters.pytorch.pipeline.models.model_gpt2 import GPT2LMHeadModel
from transformers.models.gpt2.modeling_gpt2 import GPT2Config
# from transformers import GPT2LMHeadModel

def get_data_loader(tokenizer):
    # 指定 Parquet 文件的路径
    data_files = "/root/chj/datasets/imdb/plain_text/train-00000-of-00001.parquet"
    # 加载数据集
    dataset = load_dataset("parquet", data_files=data_files)
    def encode(examples):
        # 注意：我们需要将 `input_ids` 也复制给 `labels` 以便进行语言模型训练
        encoding = tokenizer(examples['text'], truncation=True, padding='max_length', max_length=512)
        encoding['labels'] = encoding['input_ids'].copy()
        return encoding
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    # 使用 map 方法应用编码函数
    encoded_dataset = dataset.map(encode, batched=True)

    # 设置数据格式
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
    train_loader = DataLoader(encoded_dataset['train'], batch_size=2, shuffle=True)
    return train_loader
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
if tokenizer.pad_token is None:
    tokenizer.add_special_tokens({'pad_token': '[PAD]'})

dataloader = get_data_loader(tokenizer)
# print(dataloader)
train_data_iterator = iter(dataloader)
# print(train_data_iterator)

cfg = GPT2Config()
print(cfg)
cfg.n_layer = 6
model = GPT2LMHeadModel(config = cfg,pp_rank = 0,pre_process = True,post_process = False,pp_size = 2)

# map = {'transformer.wte': 'cuda:3', 'transformer.wpe': 'cuda:3', 'transformer.drop': 'cuda:3', 'transformer.h.0': 'cuda:2', 'transformer.h.1': 'cuda:2', 'transformer.h.2': 'cuda:1', 'transformer.h.3': 'cuda:1', 'transformer.h.4': 'cuda:0', 'transformer.h.5': 'cuda:0','transformer.ln_f':'cuda:0','lm_head':'cuda:3'}
# model = GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path = 'gpt2',config = cfg,pp_rank = 0,pre_process = True,post_process = True,pp_size = 2,device_map = map)
# model = GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path = 'gpt2',config = cfg,device_map = map)
input_ids = next(train_data_iterator)['input_ids']
print(input_ids)
print(input_ids.shape)
otpt = model(next(train_data_iterator)['input_ids'])