Spaces:
Sleeping
Sleeping
spedrox-sac
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import pipeline
|
3 |
from langchain_core.output_parsers import StrOutputParser
|
4 |
|
5 |
-
# Initialize the
|
6 |
model_name = "Qwen/Qwen2.5-0.5B-Instruct"
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
|
8 |
pipe = pipeline("text-generation", model=model_name, device=-1)
|
9 |
parser = StrOutputParser()
|
10 |
|
@@ -20,11 +19,8 @@ if st.button("Generate"):
|
|
20 |
user_messages = user_input.splitlines()
|
21 |
messages = [message.strip() for message in user_messages if message.strip()]
|
22 |
|
23 |
-
#
|
24 |
-
|
25 |
-
|
26 |
-
# Process tokenized messages in a batch
|
27 |
-
outputs = [pipe(input_ids=tokenized_input['input_ids'], max_new_tokens=50)[0] for tokenized_input in tokenized_inputs]
|
28 |
|
29 |
# Display the generated text for each input message
|
30 |
st.write("Generated Responses:")
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import pipeline
|
3 |
from langchain_core.output_parsers import StrOutputParser
|
4 |
|
5 |
+
# Initialize the text generation pipeline
|
6 |
model_name = "Qwen/Qwen2.5-0.5B-Instruct"
|
|
|
7 |
pipe = pipeline("text-generation", model=model_name, device=-1)
|
8 |
parser = StrOutputParser()
|
9 |
|
|
|
19 |
user_messages = user_input.splitlines()
|
20 |
messages = [message.strip() for message in user_messages if message.strip()]
|
21 |
|
22 |
+
# Process messages in a batch
|
23 |
+
outputs = pipe(messages, max_new_tokens=50) # Adjust max_new_tokens as needed
|
|
|
|
|
|
|
24 |
|
25 |
# Display the generated text for each input message
|
26 |
st.write("Generated Responses:")
|