Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -7,8 +7,16 @@ import gradio as gr
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
8 |
model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b")
|
9 |
|
|
|
|
|
|
|
|
|
10 |
pipe_flan = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
11 |
|
|
|
|
|
|
|
|
|
12 |
# Text generation
|
13 |
def generator(input):
|
14 |
output = pipe_flan(input, max_length=50, num_return_sequences=1)
|
@@ -21,5 +29,10 @@ demo = gr.Interface(
|
|
21 |
outputs=gr.outputs.Textbox(label="Generated Text")
|
22 |
)
|
23 |
|
|
|
|
|
|
|
|
|
|
|
24 |
# Lauching the Gradio Interface
|
25 |
-
demo.launch(server_name=
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
8 |
model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b")
|
9 |
|
10 |
+
print("***")
|
11 |
+
print("Loaded tokenizer and model")
|
12 |
+
print("***")
|
13 |
+
|
14 |
pipe_flan = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
15 |
|
16 |
+
print("***")
|
17 |
+
print("Created pipeline")
|
18 |
+
print("***")
|
19 |
+
|
20 |
# Text generation
|
21 |
def generator(input):
|
22 |
output = pipe_flan(input, max_length=50, num_return_sequences=1)
|
|
|
29 |
outputs=gr.outputs.Textbox(label="Generated Text")
|
30 |
)
|
31 |
|
32 |
+
host, port = "0.0.0.0", 7860
|
33 |
+
print("***")
|
34 |
+
print(f"Set up interface. Hosting now on {host}:{port}")
|
35 |
+
print("***")
|
36 |
+
|
37 |
# Lauching the Gradio Interface
|
38 |
+
demo.launch(server_name=host, server_port=port)
|