from datasets import Dataset
import json
from transformers import Qwen2ForQuestionAnswering, Trainer, TrainingArguments
import torch
from transformers import AutoTokenizer

# 加载上传的数据集
with open('D:/LLaMA-Factory/data/ptero_dataset.json', 'r', encoding='utf-8') as f:
    squad_data = json.load(f)

# 将数据集转换为 SQuAD 格式
def convert_to_squad_format(squad_data):
    contexts = []
    questions = []
    answers = []

    # 遍历每个条目
    for entry in squad_data:
        # 每个条目中的 conversations 键包含对话内容
        for conversation in entry["conversations"]:
            question = ""
            answer = ""
            # 遍历每个对话项
            if conversation["from"] == "user":
                question = conversation["value"]  # 用户的问题
            if conversation["from"] == "assistant":
                answer = conversation["value"]  # 助手的回答

            context = answer  # 这里将上下文设置为助手的回答
            answers.append({"text": answer, "start": 0})  # 假设答案从0开始
            questions.append(question)
            contexts.append(context)

    return Dataset.from_dict({
        'context': contexts,
        'question': questions,
        'answers': answers
    })

# 将数据转换为适合 Hugging Face 数据集格式的格式
dataset = convert_to_squad_format(squad_data)

# 使用 AutoTokenizer 加载分词器
tokenizer = AutoTokenizer.from_pretrained('D:/qwen')  # 请确保你使用的是与模型匹配的分词器

# 加载与 Qwen2 兼容的模型
model = Qwen2ForQuestionAnswering.from_pretrained('D:/qwen')  # 请确保模型路径正确

# 数据预处理函数
def preprocess_function(examples):
    questions = examples['question']
    contexts = examples['context']

    # 编码问题和上下文
    encodings = tokenizer(questions, contexts, truncation=True, padding='max_length', max_length=512,
                          return_tensors='pt', return_offsets_mapping=True)

    # 获取答案的起始和结束位置
    start_positions = []
    end_positions = []
    for i, answer in enumerate(examples['answers']):
        answer_start = answer['start']
        answer_end = answer_start + len(answer['text'])

        # 获取 offset_mapping，用于计算 token 中的起始和结束位置
        offset_mapping = encodings['offset_mapping'][i]
        start_token, end_token = None, None

        for idx, (start, end) in enumerate(offset_mapping):
            if start <= answer_start < end:
                start_token = idx
            if start < answer_end <= end:
                end_token = idx
            if start_token is not None and end_token is not None:
                break

        start_positions.append(start_token if start_token is not None else 0)
        end_positions.append(end_token if end_token is not None else 0)

    encodings['start_positions'] = torch.tensor(start_positions)
    encodings['end_positions'] = torch.tensor(end_positions)

    return encodings

# 对数据集应用预处理函数
dataset = dataset.map(preprocess_function, batched=True)

# 设置训练参数
training_args = TrainingArguments(
    output_dir='./results',
    eval_strategy="steps",  # 每 N 步后进行评估
    eval_steps=1,  # 评估
    save_strategy="steps",  # 每 N 步保存
    save_steps=1,  # 保存一次模型
    learning_rate=2e-5,
    per_device_train_batch_size=2,
    per_device_eval_batch_size=4,
    num_train_epochs=3,
    weight_decay=0.01,
    logging_dir='./logs',
    logging_steps=1,
    load_best_model_at_end=True,  # 加载最佳模型
)

# 初始化 Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=dataset,
    eval_dataset=dataset,
    tokenizer=tokenizer,
)

# 开始训练
trainer.train()

# 模型评估
results = trainer.evaluate()

# 打印评估结果
print(results)

# 保存训练后的模型和分词器
model.save_pretrained('./model')
tokenizer.save_pretrained('./model')
   #author