- use models
Browse files
app.py
CHANGED
@@ -8,6 +8,16 @@ from constants import tweet_generator_prompt, absa_prompt
|
|
8 |
# Adjust the layout for wider containers
|
9 |
st.set_page_config(layout="wide")
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
# Set up the title
|
12 |
st.title("Towards a Programmable Humanizing AI through Scalable Stance-Directed Architecture Dashboard")
|
13 |
|
@@ -40,23 +50,30 @@ with col12:
|
|
40 |
generate_button = st.button("Generate tweet and classify toxicity")
|
41 |
|
42 |
# Load the model (commented out, assuming model loading is handled elsewhere)
|
43 |
-
|
44 |
|
45 |
# Process the input text and generate output
|
46 |
if generate_button:
|
47 |
with col23: # This block is for displaying outputs in the wider column
|
48 |
with st.spinner('Generating the tweet...'):
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
# Displaying the input and model's output
|
55 |
-
st.write(f"Generated Tweet: {generated_tweet[0]['generated_text']}")
|
|
|
|
|
56 |
|
57 |
with st.spinner('Generating the Stance-Aware ABSA output...'):
|
58 |
-
|
59 |
-
absa_output = [{'generated_text': 'migrants:positive, rights:positive, laws:positive, safety:positive'}]
|
60 |
|
61 |
stances = [x.strip() for x in absa_output[0]['generated_text'].split(',')]
|
62 |
stances = [{
|
@@ -69,10 +86,7 @@ if generate_button:
|
|
69 |
st.table(stances_df)
|
70 |
|
71 |
with st.spinner('Classifying the toxicity...'):
|
72 |
-
|
73 |
-
model_output = [[{'label': 'LABEL_0', 'score': 0.9999998807907104},
|
74 |
-
{'label': 'LABEL_1', 'score': 1.1919785395889282e-07},
|
75 |
-
{'label': 'LABEL_2', 'score': 1.1919785395889282e-07}]]
|
76 |
output = model_output[0]
|
77 |
|
78 |
st.write("Toxicity Classifier Output:")
|
|
|
8 |
# Adjust the layout for wider containers
|
9 |
st.set_page_config(layout="wide")
|
10 |
|
11 |
+
|
12 |
+
# @st.cache_resource
|
13 |
+
def load_model():
|
14 |
+
classification_pipe = pipeline(
|
15 |
+
"text-classification", model="tweetpie/toxic-content-detector", top_k=None)
|
16 |
+
absa_pipe = pipeline("text2text-generation", model="tweetpie/stance-aware-absa")
|
17 |
+
tweet_generation_pipe = pipeline("text2text-generation", model="tweetpie/stance-directed-tweet-generator")
|
18 |
+
return classification_pipe, absa_pipe, tweet_generation_pipe
|
19 |
+
|
20 |
+
|
21 |
# Set up the title
|
22 |
st.title("Towards a Programmable Humanizing AI through Scalable Stance-Directed Architecture Dashboard")
|
23 |
|
|
|
50 |
generate_button = st.button("Generate tweet and classify toxicity")
|
51 |
|
52 |
# Load the model (commented out, assuming model loading is handled elsewhere)
|
53 |
+
classifier, absa, generator = load_model()
|
54 |
|
55 |
# Process the input text and generate output
|
56 |
if generate_button:
|
57 |
with col23: # This block is for displaying outputs in the wider column
|
58 |
with st.spinner('Generating the tweet...'):
|
59 |
+
prompt = tweet_generator_prompt.format(
|
60 |
+
ideology=model_selection.lower(),
|
61 |
+
pro_entities=pro_entities,
|
62 |
+
anti_entities=anti_entities,
|
63 |
+
neutral_entities=neutral_entities,
|
64 |
+
pro_aspects=pro_aspects,
|
65 |
+
anti_aspects=anti_aspects,
|
66 |
+
neutral_aspects=neutral_aspects
|
67 |
+
)
|
68 |
+
generated_tweet = generator(prompt, max_new_tokens=80, do_sample=True, num_return_sequences=3)
|
69 |
|
70 |
# Displaying the input and model's output
|
71 |
+
st.write(f"Generated Tweet-1: {generated_tweet[0]['generated_text']}")
|
72 |
+
st.write(f"Generated Tweet-2: {generated_tweet[1]['generated_text']}")
|
73 |
+
st.write(f"Generated Tweet-3: {generated_tweet[2]['generated_text']}")
|
74 |
|
75 |
with st.spinner('Generating the Stance-Aware ABSA output...'):
|
76 |
+
absa_output = absa(absa_prompt.format(generated_tweet=generated_tweet[0]['generated_text']))
|
|
|
77 |
|
78 |
stances = [x.strip() for x in absa_output[0]['generated_text'].split(',')]
|
79 |
stances = [{
|
|
|
86 |
st.table(stances_df)
|
87 |
|
88 |
with st.spinner('Classifying the toxicity...'):
|
89 |
+
model_output = classifier(generated_tweet[0]['generated_text'])
|
|
|
|
|
|
|
90 |
output = model_output[0]
|
91 |
|
92 |
st.write("Toxicity Classifier Output:")
|