tweetpie commited on
Commit
9235cae
·
1 Parent(s): 3ddf306

- prints fix

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -60,29 +60,29 @@ if generate_button:
60
  )
61
  )
62
 
 
 
 
63
  with st.spinner('Generating the Stance-Aware ABSA output...'):
64
  # Call the model with the aspects inputs
65
  absa_output = absa(absa_prompt.format(generated_tweet=generated_tweet[0]['generated_text']))
66
  stances = [x.strip() for x in absa_output[0]['generated_text'].split(',')]
67
 
 
 
 
68
  with st.spinner('Classifying the toxicity...'):
69
  # Call the model with the input text
70
  model_output = classifier(generated_tweet[0]['generated_text'])
71
  output = model_output[0]
72
 
73
- # Displaying the input and model's output
74
- st.write(f"Generated Tweet: {generated_tweet[0]['generated_text']}")
75
-
76
- st.write("Stance-Aware ABSA Output:")
77
- st.write(f"{stances}")
78
-
79
- st.write("Toxicity Classifier Output:")
80
- for i in range(3):
81
- if output[i]['label'] == 'LABEL_0':
82
- # st.write(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
83
- print(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
84
- elif output[i]['label'] == 'LABEL_1':
85
- # st.write(f"Toxic Content: {output[i]['score']*100:.1f}%")
86
- print(f"Toxic Content: {output[i]['score']*100:.1f}%")
87
- else:
88
- continue
 
60
  )
61
  )
62
 
63
+ # Displaying the input and model's output
64
+ st.write(f"Generated Tweet: {generated_tweet[0]['generated_text']}")
65
+
66
  with st.spinner('Generating the Stance-Aware ABSA output...'):
67
  # Call the model with the aspects inputs
68
  absa_output = absa(absa_prompt.format(generated_tweet=generated_tweet[0]['generated_text']))
69
  stances = [x.strip() for x in absa_output[0]['generated_text'].split(',')]
70
 
71
+ st.write("Stance-Aware ABSA Output:")
72
+ st.write(f"{stances}")
73
+
74
  with st.spinner('Classifying the toxicity...'):
75
  # Call the model with the input text
76
  model_output = classifier(generated_tweet[0]['generated_text'])
77
  output = model_output[0]
78
 
79
+ st.write("Toxicity Classifier Output:")
80
+ for i in range(3):
81
+ if output[i]['label'] == 'LABEL_0':
82
+ # st.write(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
83
+ print(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
84
+ elif output[i]['label'] == 'LABEL_1':
85
+ # st.write(f"Toxic Content: {output[i]['score']*100:.1f}%")
86
+ print(f"Toxic Content: {output[i]['score']*100:.1f}%")
87
+ else:
88
+ continue