docker-api / generator /llm_inference_all.py
dasomaru's picture
Upload folder using huggingface_hub
06696b5 verified
raw
history blame contribute delete
875 Bytes
from transformers import pipeline
# import spaces
# 1. λͺ¨λΈ λ‘œλ“œ (졜초 1번만 λ‘œλ“œλ¨)
generator = pipeline(
"text-generation",
model="dasomaru/gemma-3-4bit-it-demo", # λ„€κ°€ μ—…λ‘œλ“œν•œ λͺ¨λΈ 이름
tokenizer="dasomaru/gemma-3-4bit-it-demo",
device=0, # CUDA:0 μ‚¬μš© (GPU). CPU만 있으면 device=-1
max_new_tokens=512,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1
)
# 2. λ‹΅λ³€ 생성 ν•¨μˆ˜
# @spaces.GPU(duration=300)
def generate_answer(prompt: str) -> str:
"""
μž…λ ₯받은 ν”„λ‘¬ν”„νŠΈλ‘œλΆ€ν„° λͺ¨λΈμ΄ 닡변을 μƒμ„±ν•œλ‹€.
"""
print(f"πŸ”΅ Prompt Length: {len(prompt)} characters") # μΆ”κ°€!
outputs = generator(
prompt,
do_sample=True,
top_k=50,
num_return_sequences=1
)
return outputs[0]["generated_text"].strip()