shainaraza commited on
Commit
460f5fd
1 Parent(s): f199e05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -16
app.py CHANGED
@@ -1,7 +1,6 @@
1
  #%%writefile debias_app.py
2
  import streamlit as st
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification, pipeline
4
- import pandas as pd
5
 
6
  # Define the BiasPipeline class with text processing methods
7
  class BiasPipeline:
@@ -35,13 +34,7 @@ pipeline = BiasPipeline()
35
 
36
  # Streamlit interface
37
  st.title('UnBIAS App')
38
- # List of preloaded example sentences
39
- example_sentences = [
40
- "Women are just too emotional to be leaders.",
41
- # More examples...
42
- ]
43
-
44
- # Dropdown for selecting an example sentence or entering your own
45
  selected_sentence = st.selectbox("Choose an example or type your own below:", [""] + example_sentences)
46
  input_text = st.text_area("Enter text:", selected_sentence, height=150)
47
 
@@ -52,16 +45,10 @@ if st.button("Process Text"):
52
  label = classification_results[0]['label']
53
  score = classification_results[0]['score']
54
  st.write(f"**Classification:** {label} (Confidence: {score:.2f})")
55
-
56
- # Extract biased words from NER results
57
  biased_words = [result['word'] for result in ner_results if result['entity'].startswith('B-BIAS')]
58
- st.write("**Biased Words Identified:**")
59
- st.write(", ".join(biased_words))
60
  else:
61
  st.write("Please enter some text to process.")
62
 
63
  # Disclaimer
64
- st.info("Disclaimer: Please note that while this tool aims to identify and highlight biased language, no automated system is perfect. \\
65
- The detection of bias depends on various factors, including the context, the training data used for the models, \\
66
- and the inherent limitations of natural language processing technologies. As such, some biases may not be detected, \\
67
- and all results should be reviewed critically by human users.")
 
1
  #%%writefile debias_app.py
2
  import streamlit as st
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification, pipeline
 
4
 
5
  # Define the BiasPipeline class with text processing methods
6
  class BiasPipeline:
 
34
 
35
  # Streamlit interface
36
  st.title('UnBIAS App')
37
+ example_sentences = ["Women are just too emotional to be leaders.", "All young people are lazy and addicted to their phones."]
 
 
 
 
 
 
38
  selected_sentence = st.selectbox("Choose an example or type your own below:", [""] + example_sentences)
39
  input_text = st.text_area("Enter text:", selected_sentence, height=150)
40
 
 
45
  label = classification_results[0]['label']
46
  score = classification_results[0]['score']
47
  st.write(f"**Classification:** {label} (Confidence: {score:.2f})")
 
 
48
  biased_words = [result['word'] for result in ner_results if result['entity'].startswith('B-BIAS')]
49
+ st.write("**Biased Words Identified:**", ", ".join(biased_words))
 
50
  else:
51
  st.write("Please enter some text to process.")
52
 
53
  # Disclaimer
54
+ st.info("Disclaimer: Please note that while this tool aims to identify and highlight biased language, no automated system is perfect. The detection of bias depends on various factors, including the context, the training data used for the models, and the inherent limitations of natural language processing technologies. As such, some biases may not be detected, and all results should be reviewed critically by human users.")