from transformers import pipeline, set_seed
set_seed(32)
model_folder = '/root/.cache/modelscope/hub/Xorbits/opt-125m'

import os

import torch
import torch.nn as nn
import bitsandbytes as bnb
from transformers import GPT2Tokenizer, AutoConfig, OPTForCausalLM

model_id = "./model/opt-6.7b"
# model_id = '/root/.cache/modelscope/hub/Xorbits/opt-125m'
model_id = r'C:\Users\COLORFUL\.cache\modelscope\hub\Xorbits\opt-125m'

from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
model = AutoModelForCausalLM.from_pretrained(model_id,load_in_8bit=True,device_map="auto")
# model = AutoModelForCausalLM.from_pretrained(model_id,device_map="auto")
tokenizer = GPT2Tokenizer.from_pretrained(model_id)
print(model)


# 获取当前模型占用的 GPU显存（差值为预留给 PyTorch 的显存）
memory_footprint_bytes = model.get_memory_footprint()
memory_footprint_mib = memory_footprint_bytes / (1024 ** 3)  # 转换为 GB

print(f"{memory_footprint_mib:.2f}GB")

from peft import LoraConfig, get_peft_model

# 创建一个LoraConfig对象，用于设置LoRA（Low-Rank Adaptation）的配置参数
config = LoraConfig(
    r=4,  # LoRA的秩，影响LoRA矩阵的大小
    lora_alpha=8,  # LoRA适应的比例因子
    # 指定将LoRA应用到的模型模块，通常是attention和全连接层的投影
    target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out"],
    lora_dropout=0.05,  # 在LoRA模块中使用的dropout率
    bias="none",  # 设置bias的使用方式，这里没有使用bias
    task_type="CAUSAL_LM"  # 任务类型，这里设置为因果(自回归)语言模型
)

# 使用get_peft_model函数和给定的配置来获取一个PEFT模型
model = get_peft_model(model, config)

# 打印出模型中可训练的参数
model.print_trainable_parameters()



from datasets import load_dataset

dataset = load_dataset('json',data_files="C:\\Users\\COLORFUL\\Desktop\\quotes.jsonl")

from datasets import ClassLabel, Sequence
import random
import pandas as pd
# from IPython.display import display, HTML

def show_random_elements(dataset, num_examples=10):
    assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
    picks = []
    for _ in range(num_examples):
        pick = random.randint(0, len(dataset)-1)
        while pick in picks:
            pick = random.randint(0, len(dataset)-1)
        picks.append(pick)
    
    df = pd.DataFrame(dataset[picks])
    for column, typ in dataset.features.items():
        if isinstance(typ, ClassLabel):
            df[column] = df[column].transform(lambda i: typ.names[i])
        elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel):
            df[column] = df[column].transform(lambda x: [typ.feature.names[i] for i in x])
    # display(HTML(df.to_html()))
    
show_random_elements(dataset["train"])
tokenized_dataset = dataset.map(lambda samples: tokenizer(samples["quote"]), batched=True)
print(tokenized_dataset)

from transformers import DataCollatorForLanguageModeling

# 数据收集器，用于处理语言模型的数据，这里设置为不使用掩码语言模型(MLM)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)


from transformers import TrainingArguments, Trainer
# 设置CUDA_VISIBLE_DEVICES，指定使用哪些GPU
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # 使用cuda:0和cuda:1
model_dir = "models"
model_id='opt-125M'
training_args = TrainingArguments(
        output_dir=f"{model_dir}/{model_id}-lora",  # 指定模型输出和保存的目录
        per_device_train_batch_size=4,  # 每个设备上的训练批量大小
        learning_rate=2e-4,  # 学习率
        fp16=True,  # 启用混合精度训练，可以提高训练速度，同时减少内存使用
        logging_steps=50,  # 指定日志记录的步长，用于跟踪训练进度
        # max_steps=100, # 最大训练步长
        num_train_epochs=1,  # 训练的总轮数
        save_steps = 100,
        # num_train_epochs=2
    )
trainer = Trainer(
    model=model,  # 指定训练时使用的模型
    train_dataset=tokenized_dataset["train"],  # 指定训练数据集
    eval_dataset=tokenized_dataset["train"],  # 指定训练数据集
    args=training_args,
    data_collator=data_collator,
)
trainer.train()
