from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer
from datasets import Dataset
import torch
import torch.nn as nn
import torch.onnx

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 读取数据集
def read_dataset(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        text = f.read()
    lyrics = text.split('[SEP]')
    lyrics = [lyric.strip() for lyric in lyrics if lyric.strip()]
    return lyrics


# 加载分词器和模型
tokenizer = AutoTokenizer.from_pretrained("uer/gpt2-chinese-lyric")
model = AutoModelForCausalLM.from_pretrained("uer/gpt2-chinese-lyric")

# 将模型移动到指定设备
model = model.to(device)

# 读取数据集
file_path = 'train.txt'
lyrics = read_dataset(file_path)

# 创建数据集
dataset = Dataset.from_dict({'text': lyrics})


# 分词处理并填充，同时添加labels
def tokenize_function(examples):
    tokenized = tokenizer(examples["text"], truncation=True, max_length=80, padding="max_length")
    labels = []
    for input_ids in tokenized["input_ids"]:
        shifted_input_ids = input_ids[1:] + [-100]
        # 将填充位置标记为 -100
        shifted_input_ids = [token if token != tokenizer.pad_token_id else -100 for token in shifted_input_ids]
        labels.append(shifted_input_ids)
    tokenized["labels"] = labels
    return tokenized


tokenized_dataset = dataset.map(tokenize_function, batched=True)

training_args = TrainingArguments(
    output_dir='./results',
    num_train_epochs=80,
    per_device_train_batch_size=4,
    warmup_steps=1400,
    weight_decay=0.05,
    logging_dir='./logs',
    logging_steps=20,
    save_strategy="no",
)


# 重写compute_loss方法
class CustomTrainer(Trainer):
    def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
        labels = inputs.pop("labels")
        outputs = model(**inputs)
        logits = outputs.logits
        loss_fct = nn.CrossEntropyLoss()
        labels = torch.tensor(labels).to(device)
        loss = loss_fct(logits.view(-1, model.config.vocab_size), labels.view(-1))
        return (loss, outputs) if return_outputs else loss


trainer = CustomTrainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset
)

# 开始训练
trainer.train()

# 保存模型
trainer.save_model("./fine_tuned_gpt2_chinese_lyric")

# 将模型转换为 ONNX 格式
model.eval()


input_text = "示例输入文本"
input_ids = tokenizer(input_text, return_tensors="pt").input_ids


input_ids = input_ids.to(device)

# 定义输入和输出名称
input_names = ["input_ids"]
output_names = ["logits"]


torch.onnx.export(
    model,
    input_ids,
    "fine_tuned_gpt2_chinese_lyric5.6.2.onnx",
    export_params=True,
    opset_version=14,
    do_constant_folding=True,
    input_names=input_names,
    output_names=output_names,
    dynamic_axes={
        "input_ids": {0: "batch_size", 1: "sequence_length"},
        "logits": {0: "batch_size", 1: "sequence_length"}
    }
)