lucidmorto
commited on
Commit
•
6d02da4
1
Parent(s):
0cc4a5a
feat: replace client API with direct model inference
Browse filesSwitched from using the external client to directly loading the model and tokenizer with transformers library. This change improves efficiency by reducing dependency on the client API and allows for more direct control over model inference.
app.py
CHANGED
@@ -1,16 +1,15 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
|
4 |
-
#
|
5 |
-
|
|
|
6 |
|
7 |
def generate_text(input_text):
|
8 |
-
#
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
)
|
13 |
-
return result
|
14 |
|
15 |
iface = gr.Interface(
|
16 |
fn=generate_text,
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
+
# Load the model and tokenizer
|
5 |
+
model = AutoModelForCausalLM.from_pretrained("umutbozdag/autotrain-g39vl-h3lir")
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained("umutbozdag/autotrain-g39vl-h3lir")
|
7 |
|
8 |
def generate_text(input_text):
|
9 |
+
# Tokenize input and generate text
|
10 |
+
inputs = tokenizer(input_text, return_tensors="pt")
|
11 |
+
outputs = model.generate(**inputs, max_length=100)
|
12 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
13 |
|
14 |
iface = gr.Interface(
|
15 |
fn=generate_text,
|