from modelscope import AutoModelForCausalLM, AutoTokenizer
import json
import os
from datetime import datetime
import nltk
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from bert_score import score  # 导入bert_score库

# 设置nltk_data路径，使用已有的本地数据
nltk_data_path = "/mnt/ssd/jsj/patient/nltk_data"
if os.path.exists(nltk_data_path):
    nltk.data.path.append(nltk_data_path)
    print(f"已加载本地nltk_data路径: {nltk_data_path}")
else:
    print(f"警告: 未找到指定的nltk_data路径: {nltk_data_path}")
    # 如果找不到，nltk会使用默认路径

# 修改为本地模型路径
# 如果你有下载好的本地模型，可以直接指定本地路径
local_model_path = "/mnt/ssd/jsj/models/models/Qwen/Qwen2.5-7B-Instruct"  # 示例本地路径
# 如果要使用在线模型，可以保持原来的 model_name
# model_name = "Qwen/Qwen2.5-7B-Instruct"

# 设置使用指定的 GPU
# 方式1: 使用 device_map 指定 GPU 索引
# 例如使用第 0 个 GPU: device_map="cuda:0"
# 或者指定多个 GPU: device_map="auto" 会自动分配到可用的 GPU

# 方式2: 在 from_pretrained 后使用 .to() 方法移动到指定设备
model = AutoModelForCausalLM.from_pretrained(
    local_model_path,  # 使用本地模型路径
    torch_dtype="auto",
    device_map="cuda:0"  # 指定使用第 0 个 GPU
)
# 如果想使用更灵活的 GPU 选择，可以使用以下方式
# import torch
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model = model.to(device)

tokenizer = AutoTokenizer.from_pretrained(local_model_path)  # 与模型路径保持一致

# 输入问题
prompt = "你是谁"
messages = [
    {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

# 生成结果
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)

# 创建保存结果的目录（如果不存在）
save_dir = "/mnt/ssd/jsj/patient/results/new"  # 结果保存目录
os.makedirs(save_dir, exist_ok=True)

# 读取ground truth
ground_truth_path = os.path.join(save_dir, "groundtruth.json")
ground_truth = ""
if os.path.exists(ground_truth_path):
    with open(ground_truth_path, "r", encoding="utf-8") as f:
        ground_truth_data = json.load(f)
        ground_truth = ground_truth_data.get("ground_truth", "")
    print(f"已加载ground truth: {ground_truth}")
else:
    print(f"未找到ground truth文件: {ground_truth_path}")

# 计算BLEU分数
smoother = SmoothingFunction().method1
bleu_scores = {}
if ground_truth:
    # 分词（中文可以按字符分词）
    reference = list(ground_truth)
    candidate = list(response)
    
    # 计算BLEU-1到BLEU-4
    bleu_scores["bleu_1"] = sentence_bleu([reference], candidate, weights=(1, 0, 0, 0), smoothing_function=smoother)
    bleu_scores["bleu_2"] = sentence_bleu([reference], candidate, weights=(0.5, 0.5, 0, 0), smoothing_function=smoother)
    bleu_scores["bleu_3"] = sentence_bleu([reference], candidate, weights=(0.33, 0.33, 0.33, 0), smoothing_function=smoother)
    bleu_scores["bleu_4"] = sentence_bleu([reference], candidate, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=smoother)
    
    print(f"\nBLEU分数:")
    for key, value in bleu_scores.items():
        print(f"{key}: {value:.4f}")

# 计算BERTScore
bert_scores = {}
if ground_truth:
    try:
        # 计算BERTScore，使用中文模型
        # 使用本地路径的BERT模型
        P, R, F1 = score([response], [ground_truth], lang="zh", model_type="bert-base-chinese")
        bert_scores = {
            "bert_score_precision": float(P.mean()),
            "bert_score_recall": float(R.mean()),
            "bert_score_f1": float(F1.mean())
        }
        
        print(f"\nBERTScore:")
        for key, value in bert_scores.items():
            print(f"{key}: {value:.4f}")
    except Exception as e:
        print(f"计算BERTScore时出错: {str(e)}")

# 准备要保存的数据
result_data = {
    "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
    "prompt": prompt,
    "response": response,
    "model_path": local_model_path,
    "system_prompt": messages[0]["content"]
}

# 添加BLEU分数到结果数据
if bleu_scores:
    result_data.update(bleu_scores)

# 添加BERTScore到结果数据
if bert_scores:
    result_data.update(bert_scores)

# 保存到JSON文件
# 使用时间戳命名文件，避免重复
file_name = f"query_result_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
file_path = os.path.join(save_dir, file_name)

with open(file_path, "w", encoding="utf-8") as f:
    json.dump(result_data, f, ensure_ascii=False, indent=2)

print(f"\n结果已保存到: {file_path}")
