from transformers import pipeline | |
from huggingface_hub import login | |
import os | |
# 设置 Hugging Face token | |
HF_TOKEN = os.getenv('HF_TOKEN') # 从环境变量获取 token | |
if HF_TOKEN: | |
login(token=HF_TOKEN) # 使用 token 登录 | |
else: | |
print("请设置环境变量 HF_TOKEN 为你的 Hugging Face token") | |
print("Windows 设置方法: $env:HF_TOKEN = '你的token'") | |
print("或者直接在代码中设置: login(token='你的token')") | |
exit(1) | |
safe_pipe = pipeline( | |
"text-generation", | |
model="meta-llama/Meta-Llama-3-8B", | |
torch_dtype="auto", | |
device_map="auto" | |
) | |
naive_pipe = pipeline( | |
"text-generation", | |
model="microsoft/DialoGPT-medium", | |
torch_dtype="auto", | |
device_map="auto" | |
) | |
safe_out = safe_pipe(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"] | |
naive_out = naive_pipe(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"] | |
print("=== 安全对齐模型回答 ===") | |
print(safe_out) | |
print("\n=== 无对齐模型回答 ===") | |
print(naive_out) |