Create handler.py
Browse files- handler.py +31 -0
handler.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
2 |
+
import torch
|
3 |
+
|
4 |
+
# โหลดโมเดลและ tokenizer
|
5 |
+
def init():
|
6 |
+
global model, tokenizer, generator
|
7 |
+
model_name = "niruemon/llm-swp" # ชื่อโมเดลใน Hugging Face Hub
|
8 |
+
|
9 |
+
# โหลดโมเดลและ tokenizer
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
12 |
+
|
13 |
+
# สร้าง pipeline สำหรับการสร้างข้อความ
|
14 |
+
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
|
15 |
+
|
16 |
+
# ประมวลผลคำขอจากผู้ใช้
|
17 |
+
def inference(inputs):
|
18 |
+
global generator
|
19 |
+
|
20 |
+
# รับข้อความจาก inputs
|
21 |
+
prompt = inputs.get("text", "")
|
22 |
+
if not prompt:
|
23 |
+
return {"error": "No input text provided."}
|
24 |
+
|
25 |
+
# สร้างข้อความโดยใช้โมเดล
|
26 |
+
try:
|
27 |
+
result = generator(prompt, max_length=150, num_return_sequences=1)
|
28 |
+
generated_text = result[0]["generated_text"]
|
29 |
+
return {"generated_text": generated_text}
|
30 |
+
except Exception as e:
|
31 |
+
return {"error": str(e)}
|