import json
import torch
from tqdm import tqdm
from datasets import Dataset
from transformers import TrainingArguments, Trainer,DataCollatorForSeq2Seq
from peft import LoraConfig,get_peft_model
from modelscope import AutoModelForCausalLM,AutoTokenizer

model_name = r'Qwen/Qwen3-1.7B'
output_dir = './qwen_lora_finetunned'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name,torch_dtype=torch.bfloat16,device_map='auto',use_cache=False)
tokenizer.pad_token = tokenizer.eos_token
#定义Lora配置
peft_config = LoraConfig(
    r = 8,
    lora_alpha=32,
    target_modules=['q_proj','v_proj','k_proj','o_proj'],
    lora_dropout=0.05,
    bias='none',
    task_type='CAUSAL_LM',
    modules_to_save=['embed_tokens','lm_head']
)
model = get_peft_model(model,peft_config)
model.print_trainable_parameters()

# 数据处理函数
def format_conversation(example):
    """带数据校验的对话标准格式处理"""
    messages = []
    for msg in example["conversation"]:
        role = msg.get("role",msg.get("assistant","unknown")).lower()
        content = msg.get("content","")
        # 验证角色有效性
        if role not in ['user','assistant']:
            if len(messages) == 0:
                role = "user"
            else:
                role = "assistant" if messages[-1]["role"] == "user" else "user"

        #跳过无效消息
        if len(content.strip()) < 1:
            content
        messages.append({"role":role,"content":content})

    # 构建对话木板
    try:
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=False
        )
    except Exception as e:
        return None