Ariel Hsieh commited on
Commit
b2c3df8
1 Parent(s): 0afd711

update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -6,7 +6,7 @@ import numpy as np
6
  import pandas as pd
7
 
8
  #title
9
- st.title("Toxic Tweets")
10
 
11
  selection = st.selectbox("Select fine-tuned model",("Ariel8/toxic-tweets-classification","roberta-large-mnli","twitter-XLM-roBERTa-base"))
12
 
@@ -25,7 +25,7 @@ if selection == "Ariel8/toxic-tweets-classification":
25
  "How dare you vandalize that page about the HMS Beagle! Don't vandalize again, demon!",
26
  ":Thanks for the comment about Wiki-defenderness. I like that one. I usually wikiling Wiki-defender. I agree that at first he was somewhat innocent but now have my doubts as he is being really agressive about the whole matter."]
27
 
28
- text = st.text_input("Enter Text here for Toxicity Classification:","I hate everything")
29
 
30
  if st.button("Run Toxicity Classification of Text (and prepopulated Tweets)"):
31
  tweets.append(text)
 
6
  import pandas as pd
7
 
8
  #title
9
+ st.title("Toxic Tweet Classification / Sentiment Analysis")
10
 
11
  selection = st.selectbox("Select fine-tuned model",("Ariel8/toxic-tweets-classification","roberta-large-mnli","twitter-XLM-roBERTa-base"))
12
 
 
25
  "How dare you vandalize that page about the HMS Beagle! Don't vandalize again, demon!",
26
  ":Thanks for the comment about Wiki-defenderness. I like that one. I usually wikiling Wiki-defender. I agree that at first he was somewhat innocent but now have my doubts as he is being really agressive about the whole matter."]
27
 
28
+ text = st.text_input("Enter Text here for Toxicity Classification:","Artificial Intelligence is useful")
29
 
30
  if st.button("Run Toxicity Classification of Text (and prepopulated Tweets)"):
31
  tweets.append(text)