mostafaHaydar commited on
Commit
bcd3268
·
verified ·
1 Parent(s): f949d6d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -9
app.py CHANGED
@@ -1,10 +1,10 @@
 
1
  import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load your text generation model and tokenizer
5
- model_name = "mostafaHaydar/model_test"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
  # Set up the Streamlit app
10
  st.title("Text Generation with LLaMA 3")
@@ -15,10 +15,8 @@ prompt = st.text_area("Enter your prompt:")
15
  # Generate text when the user clicks the button
16
  if st.button("Generate"):
17
  if prompt:
18
- # Tokenize and generate text
19
- inputs = tokenizer(prompt, return_tensors="pt")
20
- output = model.generate(**inputs, max_length=150) # Adjust max_length as needed
21
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
22
 
23
  # Display the generated text
24
  st.subheader("Generated Text:")
 
1
+
2
  import streamlit as st
3
+ from transformers import pipeline
4
 
5
+ # Load the pipeline for text generation
6
+ model_name = "mostafaHaydar/model_test" # Replace with your model's name
7
+ generator = pipeline("text-generation", model=model_name)
 
8
 
9
  # Set up the Streamlit app
10
  st.title("Text Generation with LLaMA 3")
 
15
  # Generate text when the user clicks the button
16
  if st.button("Generate"):
17
  if prompt:
18
+ # Generate text using the pipeline
19
+ generated_text = generator(prompt, max_length=150, num_return_sequences=1)[0]['generated_text']
 
 
20
 
21
  # Display the generated text
22
  st.subheader("Generated Text:")