import json
import torch
import os
import swanlab
from datasets import Dataset
import pandas as pd
from trl import SFTTrainer
from modelscope import snapshot_download
from swanlab.integration.transformers import SwanLabCallback
from peft import LoraConfig, TaskType, get_peft_model
from transformers import AutoModelForCausalLM, AutoTokenizer,TrainingArguments, Trainer, DataCollatorForSeq2Seq

jsonl_path = "train.jsonl"
#输出结果必须保存在该目录
output_path = c2net_context.output_path
modeloutput_path = output_path+ "/" + "Qwen2.5-3B-Instruct"
if not os.path.exists(modeloutput_path):
    os.makedirs(modeloutput_path)
    
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.

### Instruction:
{}
### Input:
{}
### Response:
{}"""

def process_func(example):
    instruction = example["system"]
    input_text = example["conversation"][0]['human']
    output = example["conversation"][0]['assistant']
    text = alpaca_prompt.format(instruction, input_text, output)
    tokenizerd = tokenizer(text,return_tensors="pt",truncation=True,padding='max_length',max_length=512) 
    return {
        "input_ids": tokenizerd.input_ids[0],
        "labels": tokenizerd.input_ids[0].clone()
    }

def predict(messages, model, tokenizer):
    device = "cuda"
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(device)

    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    
    print(response)
     
    return response
    

train_df = pd.read_json(jsonl_path, lines=True)
ds = Dataset.from_pandas(train_df)
tokenized_id = ds.map(process_func, num_proc=4)

# 在modelscope上下载Qwen1.5-7B模型到本地目录下
model_dir = snapshot_download("qwen/Qwen1.5-7B-Chat", cache_dir="./", revision="master")

# Transformers加载模型权重
tokenizer = AutoTokenizer.from_pretrained(model_dir, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", torch_dtype=torch.bfloat16)
model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法


config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    inference_mode=False,  # 训练模式
    r=8,  # Lora 秩
    lora_alpha=32,  # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.1,  # Dropout 比例
)

model = get_peft_model(model, config)
swanlab_callback = SwanLabCallback(project="Qwen-fintune", experiment_name="Qwen1.5-7B-Chat")

# SFT Trainer 配置
trainer = SFTTrainer(
    model=model,
    tokenizer=tokenizer,
    train_dataset=tokenized_id,
    dataset_text_field="text",
    max_seq_length=512,
    dataset_num_proc=2,
    packing=False,  # 可以让短序列的训练速度提高5倍。
    args=TrainingArguments(
        per_device_train_batch_size=1,
        gradient_accumulation_steps=4,
        warmup_steps=5,
        learning_rate=2e-4,  # 学习率
        fp16=not torch.cuda.is_bf16_supported(),
        bf16=torch.cuda.is_bf16_supported(),
        logging_steps=1,
        optim="adamw_8bit",
        weight_decay=0.01,
        lr_scheduler_type="linear",
        seed=3407,
        output_dir=args.output_dir,
        resume_from_checkpoint=resume_from_checkpoint,
    ),
    callbacks=[swanlab_callback]
)

trainer.train()
model.save_pretrained(modeloutput_path)







from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, GenerationConfig
from peft import PeftModel

model = AutoModelForCausalLM.from_pretrained(model_dir)
model = PeftModel.from_pretrained(model, 'Qwen2.5-3B-Instruct/checkpoint-1000')
model = model.merge_and_unload()
output_dir = "Qwen2.5-3B-Instruct-test"
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

tokenizer.save_pretrained(output_dir)
model.save_pretrained(output_dir, safe_serialization=False,max_shard_size='10GB')
device = model.device
messages = [
    {"role": "user", "content": "雪雪贵姓？"}
]
inputs = tokenizer.apply_chat_template(
    messages,
    tokenize=True,
    add_generation_prompt=True,
    return_tensors="pt",
).to(device)
# 生成回答
outputs = model.generate(
    input_ids=inputs,
    max_new_tokens=128,
    use_cache=True,
    temperature=1.5,
    min_p=0.1
)
print(tokenizer.batch_decode(outputs))
# # 用测试集的前10条，测试模型
# test_df = pd.read_json(jsonl_path, lines=True)[:10]

# test_text_list = []
# for index, row in test_df.iterrows():
#     instruction = row['instruction']
#     input_value = row['input']
    
#     messages = [
#         {"role": "system", "content": f"{instruction}"},
#         {"role": "user", "content": f"{input_value}"}
#     ]

#     response = predict(messages, model, tokenizer)
#     messages.append({"role": "assistant", "content": f"{response}"})
#     result_text = f"{messages[0]}\n\n{messages[1]}\n\n{messages[2]}"
#     test_text_list.append(swanlab.Text(result_text, caption=response))
    
# swanlab.log({"Prediction": test_text_list})
# #回传结果到openi，只有训练任务才能回传
# upload_output()
swanlab.finish()