llm-comparison-demo / model_a.py
HeChuan1's picture
Create model_a.py (#9)
91abdf6 verified
raw
history blame contribute delete
487 Bytes
# model_wrappers/model_a.py
from transformers import pipeline
# 中文 GPT2:Wenzhong
model_a = pipeline("text-generation", model="IDEA-CCNL/Wenzhong-GPT2-110M", tokenizer="IDEA-CCNL/Wenzhong-GPT2-110M")
def run_model_a(prompt: str) -> str:
output = model_a(prompt,
max_length=100,
do_sample=True,
temperature=0.8,
top_k=50,
top_p=0.95)
return output[0]["generated_text"]