Spaces:
Running
on
T4
Running
on
T4
"""Run qwen 7b. | |
transformers 4.31.0 | |
""" | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from transformers.generation import GenerationConfig | |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) | |
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval() | |
model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 | |
response, history = model.chat(tokenizer, "你好", history=None) | |
# response, history = model.chat(tokenizer, "你好", history=[]) | |
print(response) | |