from geesibling.adapters.pytorch.pipeline.models.model_gpt2 import GPT2LMHeadModel,GPT2ForSequenceClassification
from transformers import GPT2Config,AutoConfig,AutoTokenizer, PreTrainedTokenizer
from transformers.models.gpt2.modeling_gpt2 import GPT2Config
from datasets import load_dataset
from transformers import GPT2Tokenizer
from torch.utils.data import DataLoader

tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained('gpt2', use_fast=True)
if not getattr(tokenizer, "pad_token", None):
    tokenizer.pad_token = tokenizer._eos_token
    
cfg = AutoConfig.from_pretrained(
        'gpt2',
        num_labels=2,
        pad_token=tokenizer.pad_token,
        pad_token_id=tokenizer.pad_token_id,
)

pp_rank = 0
pre = True
post = False
pp_size = 2

model1 = GPT2ForSequenceClassification.from_pretrained('gpt2', config=cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size).to('cuda:0')

pp_rank = 1
pre = False
post = True
pp_size = 2

model2 = GPT2ForSequenceClassification.from_pretrained('gpt2', config=cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size).to('cuda:1')



def get_data_loader(tokenizer,bs,tensor_length):
    # 加载数据集
    data_files = {
            'train':'/root/chj/gees/GeeSibling/examples/datasets/mrpc/train.jsonl',
            'test':'/root/chj/gees/GeeSibling/examples/datasets/mrpc/test.jsonl',
            'validation':'/root/chj/gees/GeeSibling/examples/datasets/mrpc/validation.jsonl'
            }
    
    dataset = load_dataset('json', data_files=data_files)
    # tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    # if tokenizer.pad_token is None:
    #     tokenizer.pad_token = tokenizer.eos_token

    def tokenize_function(examples):
        # 对文本对进行编码
        return tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)

    # 应用文本编码
    encoded_dataset = dataset.map(tokenize_function, batched=True)
    # 设置格式为 PyTorch
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
    train_dataset = encoded_dataset['train']
    train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=True)
    print('data done')
    return train_dataloader



train_dataloader = get_data_loader(tokenizer,8,128)

import torch.optim as optim
optimizer = optim.Adam(model2.parameters(), lr=5e-5)

import torch

model1.train()
model2.train()

for epoch in range(1):  # 训练3个周期
    for batch in train_dataloader:
        optimizer.zero_grad()
        
        # 移动数据到相应的设备
        input_ids = batch['input_ids'].to(0)
        labels = batch['label'].to(1)
        
        # 前向传播
        outputs1 = model1(input_ids=input_ids)
        # print(outputs1.grad)
        output_tensor = model2(inputs_embeds = outputs1.to(1),labels = labels)
        loss = output_tensor.loss
        if output_tensor.logits.requires_grad:
            print('true')
            output_tensor.logits.retain_grad()
            # output_tensor.hidden_states.retain_grad()
            outputs1.retain_grad()
            
        loss.backward()
        print(output_tensor.logits.grad.shape)
        # print(output_tensor.hidden_states.grad)
        print(outputs1.grad.shape)
        
        
        optimizer.step()
        break
        print(f"Epoch {epoch + 1}, Loss: {loss.item()}")