import pandas as pd
import torch
from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer
from modelscope import snapshot_download
from peft import LoraConfig, TaskType, get_peft_model, PeftModel
import os


# 基础模型路径
model_path = f"/app/models/DeepSeek-R1-Distill-Llama-8B".replace('.', '___')
# 微调后模型路径
fine_tuned_model_path = "./fine_tuned_kr_model"

# 加载tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token

# 加载基础模型
base_model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.half)
# 检查CUDA是否可用，然后检查MPS是否可用，最后回退到CPU
device = torch.device("cuda:0")
base_model.to(device)

# 加载LoRA权重
fine_tuned_model = PeftModel.from_pretrained(base_model, fine_tuned_model_path)

# 定义问题
text = "Write a Karate DSL scenario for API security testing, considering vulnerabilities like SQL injection, Broken Authentication, CSRF, and others. Depending on the required fields (body params, query params, assertion types and so on), the scenario should be structured as follows: Feature: [Feature Name] Background: * [Background details such as URL path/headers] Scenario: [ScenarioID]-[Scenario Name] Given url/path And request body (if any) And headers When method POST/PUT/GET/DELETE Then status And match body (if any). Include only the required Karate DSL code for the java feature scenario code. The scenario should include the endpoint path, request headers, and a JSON body (if required) with all required fields as per the schema. Validate the response to ensure it has the required status code and the response body matches the expected schema. Include dynamic data handling for fields where applicable and, ensure that common setup steps are in the Background section. Handle potential error responses gracefully. Use the following OpenAPI specification for the scenario generation."

# 构建输入文本，模仿训练时的格式
input_text = f"{text}\n\n"

# 对输入文本进行分词处理
inputs = tokenizer(input_text, return_tensors="pt").to(device)

# 生成回答
outputs = fine_tuned_model.generate(
    input_ids=inputs["input_ids"],
    attention_mask=inputs["attention_mask"],
    max_new_tokens=3000,  # 控制生成回答的最大长度
    temperature=0.7  # 控制生成文本的随机性，可根据需要调整
)

# 解码生成的回答
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

# 提取回答部分，去除输入部分
answer_start = input_text.rstrip()  # 去除输入文本末尾的空白字符
answer = generated_text[len(answer_start):]

print("模型生成的回答：", generated_text)
