from datetime import datetime
from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy
# from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy

# from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM
from transformers import LlamaForCausalLM

import time
# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()


import torch

from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd

from transformers import LlamaTokenizer

import time
def get_data_loader(tokenizer,bs,tensor_length):
    with open('/root/chj/rmo.txt', 'r', encoding='utf-8') as file:
        dataset = file.read()
    # 重复文本内容 100 次
    dataset = dataset * 30
    
    indexed_text = tokenizer.encode(dataset)
    del(dataset)

    dataset_cut = []
    for i in range(len(indexed_text)//tensor_length):
        # 将字符串分段成长度为 512
        dataset_cut.append(indexed_text[i*tensor_length:(i+1)*tensor_length])
    del(indexed_text)

    dataset_tensor = torch.tensor(dataset_cut, dtype=torch.long)

    # 构建数据集
    train_set = TensorDataset(dataset_tensor, dataset_tensor)  # 标签与样本数据相同

    # 定义 collate_fn 函数
    def collate_fn(batch):
        ids = torch.stack([item[0] for item in batch])
        labels = torch.stack([item[1] for item in batch])
        return {'input_ids': ids, 'labels': labels}

    # 创建 DataLoader
    train_loader = DataLoader(dataset=train_set, batch_size=bs, shuffle=False, collate_fn=collate_fn)
    print(len(train_loader))
    return train_loader

def train_model(model, train_loader, optimizer, device, epochs):
    model.train()
    print('start train')
    for epoch in range(epochs):
        for (batch_id,batch) in enumerate(train_loader):
            print(f"{batch_id} / {len(train_loader)}")
            inputs = batch['input_ids'].to(device)
            labels = batch['labels'].to(device)

            optimizer.zero_grad()

            outputs = model(input_ids=inputs, labels=labels)
            loss = outputs.loss

            loss.backward()
            optimizer.step()

            print(f"Epoch: {epoch + 1}, Loss: {loss.item()}")


if __name__ == "__main__":

    tokenizer = LlamaTokenizer.from_pretrained('/root/.cache/huggingface/hub/Llama-2-13b-hf')
    # if tokenizer.pad_token is None:
    #     tokenizer.add_special_tokens({'pad_token': '[PAD]'})

    model = LlamaForCausalLM.from_pretrained(pretrained_model_name_or_path = '/root/.cache/huggingface/hub/Llama-2-13b-hf',device_map = 'auto')
    model.train()
    print(model.hf_device_map)
    # bs_list = [1,2, 4, 8, 16, 32]
    # len_list = [512, 1024]
    # for bs in bs_list:
    #     for tensor_length in len_list:
    bs = 1
    tensor_length = 512
    epochs = 1
    train_loader = get_data_loader(tokenizer, bs, tensor_length)
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
    start = time.time()
    train_model(model, train_loader, optimizer, device='cuda', epochs=epochs)
    print(f'train on bs:{bs},len:{len},time:{time.time() - start}')
                
            
            
    
