Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,10 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import
|
3 |
|
4 |
-
# Load
|
5 |
-
model_name = "mostafaHaydar/model_test"
|
6 |
-
|
7 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
9 |
# Set up the Streamlit app
|
10 |
st.title("Text Generation with LLaMA 3")
|
@@ -15,10 +15,8 @@ prompt = st.text_area("Enter your prompt:")
|
|
15 |
# Generate text when the user clicks the button
|
16 |
if st.button("Generate"):
|
17 |
if prompt:
|
18 |
-
#
|
19 |
-
|
20 |
-
output = model.generate(**inputs, max_length=150) # Adjust max_length as needed
|
21 |
-
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
22 |
|
23 |
# Display the generated text
|
24 |
st.subheader("Generated Text:")
|
|
|
1 |
+
|
2 |
import streamlit as st
|
3 |
+
from transformers import pipeline
|
4 |
|
5 |
+
# Load the pipeline for text generation
|
6 |
+
model_name = "mostafaHaydar/model_test" # Replace with your model's name
|
7 |
+
generator = pipeline("text-generation", model=model_name)
|
|
|
8 |
|
9 |
# Set up the Streamlit app
|
10 |
st.title("Text Generation with LLaMA 3")
|
|
|
15 |
# Generate text when the user clicks the button
|
16 |
if st.button("Generate"):
|
17 |
if prompt:
|
18 |
+
# Generate text using the pipeline
|
19 |
+
generated_text = generator(prompt, max_length=150, num_return_sequences=1)[0]['generated_text']
|
|
|
|
|
20 |
|
21 |
# Display the generated text
|
22 |
st.subheader("Generated Text:")
|