import pandas as pd
import torch
from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer
from modelscope import snapshot_download
from peft import LoraConfig, TaskType, get_peft_model, PeftModel
import json

# 需要微调的基座模型
model_id = 'DeepSeek-R1-Distill-Llama-8B'

# 检查CUDA是否可用，然后检查MPS是否可用，最后回退到CPU
device = torch.device("cpu")

base_dir = '/app'
models_dir = f'{base_dir}/models'
#   https://hf-mirror.com/datasets/EmilMarian/BOLA-Karate-DSL-Dataset
dataset_file = f'{base_dir}/datasets/bola-raw-dataset_v5.jsonl'
model_path = f"{models_dir}/{model_id.replace('.', '___')}"
checkpoint_dir = f"./checkpoint/{model_id}"
lora_dir = f"./lora/{model_id}"

torch_dtype = torch.half

lora_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    inference_mode=False,
    r=8,
    lora_alpha=32,
    lora_dropout=0.1
)


def train():
    # 加载模型
    #model_dir = snapshot_download(model_id=model_id, cache_dir=f"{models_dir}", revision='master')
    #if model_path != model_dir:
    #    raise Exception(f"model_path:{model_path} != model_dir:{model_dir}")

    # 加载 tokenizer
    tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
    tokenizer.pad_token = tokenizer.eos_token

    # 加载模型
    #model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch_dtype)
    model = AutoModelForCausalLM.from_pretrained(model_path, device_map={"":device}, torch_dtype=torch_dtype)

    # 将模型移动到目标设备
    if device.type == "cuda":
        model.to_empty(device=device)  # 使用 to_empty() 移动元张量
    else:
        model.to(device)  # 对于非 CUDA 设备，直接使用 to()

    model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法

    # 加载数据
    data = []
    with open(dataset_file, 'r', encoding='utf-8') as f:
        for line in f:
            data.append(json.loads(line))
    ds = Dataset.from_list(data)
    print(ds[:3])

    # 定义 process_func 函数
    def process_func(item):
        MAX_LENGTH = 384
        input_ids, attention_mask, labels = [], [], []
        # 将输入的字符串解析为 JSON 对象
        data = json.loads(line)
        text = data["text"]

        # 提取问题
        question_start = text.find("[question]: ") + len("[question]: ")
        question_end = text.find(" [response]: ")
        input_text = text[question_start:question_end]

        # 提取响应
        response = text[question_end + len(" [response]: "):]

        # 构建包含思维链的输入部分
        input_part = tokenizer(
            f"<|start_header_id|>user<|end_header_id|>\n\n{input_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n<|eot_id|>",
            add_special_tokens=False
        )
        response_part = tokenizer(f"{response}<|eot_id|>", add_special_tokens=False)

        input_ids = input_part["input_ids"] + response_part["input_ids"] + [tokenizer.pad_token_id]
        attention_mask = input_part["attention_mask"] + response_part["attention_mask"] + [1]
        labels = [-100] * len(input_part["input_ids"]) + response_part["input_ids"] + [tokenizer.pad_token_id]

        if len(input_ids) > MAX_LENGTH:
            input_ids = input_ids[:MAX_LENGTH]
            attention_mask = attention_mask[:MAX_LENGTH]
            labels = labels[:MAX_LENGTH]

        return {
            "input_ids": input_ids,
            "attention_mask": attention_mask,
            "labels": labels
        }

    tokenized_id = ds.map(process_func, remove_columns=ds.column_names)

    tokenizer.decode(list(filter(lambda x: x != -100, tokenized_id[1]["labels"])))

    # 加载lora权重
    model = get_peft_model(model, lora_config)

    # 训练模型
    training_args = TrainingArguments(
        output_dir=checkpoint_dir,
        per_device_train_batch_size=4,
        gradient_accumulation_steps=4,
        logging_steps=10,
        num_train_epochs=3,
        save_steps=100,
        learning_rate=1e-4,
        save_on_each_node=True,
        gradient_checkpointing=True,
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_id,
        data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
    )
    train_result = trainer.train()
        # 获取训练损失
    train_loss = train_result.training_loss
    print(f"Training Loss: {train_loss}")


    text = "Write a Karate DSL scenario for API security testing, considering vulnerabilities like SQL injection, Broken Authentication, CSRF, and others. Depending on the required fields (body params, query params, assertion types and so on), the scenario should be structured as follows: Feature: [Feature Name] Background: * [Background details such as URL path/headers] Scenario: [ScenarioID]-[Scenario Name] Given url/path And request body (if any) And headers When method POST/PUT/GET/DELETE Then status And match body (if any). Include only the required Karate DSL code for the java feature scenario code. The scenario should include the endpoint path, request headers, and a JSON body (if required) with all required fields as per the schema. Validate the response to ensure it has the required status code and the response body matches the expected schema. Include dynamic data handling for fields where applicable and, ensure that common setup steps are in the Background section. Handle potential error responses gracefully. Use the following OpenAPI specification for the scenario generation."
    inputs = tokenizer(f"User: {text}\n\n", return_tensors="pt")
    outputs = model.generate(**inputs.to(model.device), max_new_tokens=3000)

    result = tokenizer.decode(outputs[0], skip_special_tokens=True)
    print(result)

    # 保存模型
    trainer.model.save_pretrained(lora_dir)
    tokenizer.save_pretrained(lora_dir)

    # 保存微调后的模型
    trainer.save_model("./fine_tuned_kr_model")

# 新增的推理函数，只接收一个输入文本参数
def generate_output(input_text):
    # 加载 tokenizer
    tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
    tokenizer.pad_token = tokenizer.eos_token

    # 加载基础模型
    base_model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch_dtype)
    base_model.to(device)

    # 加载LoRA权重
    lora_model = PeftModel.from_pretrained(base_model, lora_dir)
    lora_model.eval()

    input_ids = tokenizer(f"<|start_header_id|>user<|end_header_id|>\n\n{input_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", return_tensors="pt").input_ids.to(device)

    # 生成输出
    with torch.no_grad():
        outputs = lora_model.generate(input_ids=input_ids, max_new_tokens=200, do_sample=True, top_p=0.9, temperature=0.8)

    # 解码输出
    output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return output_text



if __name__ == '__main__':
    train()
    #res = generate_output("一名27岁的孕妇怀孕3个月后骑车摔倒，出现阴道少量出血、腰腹坠痛、精神倦怠、脉滑无力等症状，此时治疗的最佳方剂是什么？")
    #print(res)
