Zekun Wu
update
c91dc45
raw history blame
No virus
5.33 kB
import streamlit as st
import pandas as pd
from datasets import load_dataset
from random import sample
from utils.metric import Regard
from utils.model import gpt2
import os
# Set up the Streamlit interface
st.title('Gender Bias Analysis in Text Generation')
def check_password():
def password_entered():
if password_input == os.getenv('PASSWORD'):
st.session_state['password_correct'] = True
else:
st.error("Incorrect Password, please try again.")
password_input = st.text_input("Enter Password:", type="password")
submit_button = st.button("Submit", on_click=password_entered)
if submit_button and not st.session_state.get('password_correct', False):
st.error("Please enter a valid password to access the demo.")
if not st.session_state.get('password_correct', False):
check_password()
else:
st.sidebar.success("Password Verified. Proceed with the demo.")
if 'data_size' not in st.session_state:
st.session_state['data_size'] = 10
if 'bold' not in st.session_state:
st.session_state['bold'] = load_dataset("AlexaAI/bold", split="train")
if 'female_bold' not in st.session_state:
st.session_state['female_bold'] = []
if 'male_bold' not in st.session_state:
st.session_state['male_bold'] = []
st.subheader('Step 1: Set Data Size')
data_size = st.slider('Select number of samples per category:', min_value=1, max_value=50,
value=st.session_state['data_size'])
st.session_state['data_size'] = data_size
if st.button('Show Data'):
st.session_state['female_bold'] = sample(
[p for p in st.session_state['bold'] if p['category'] == 'American_actresses'], data_size)
st.session_state['male_bold'] = sample(
[p for p in st.session_state['bold'] if p['category'] == 'American_actors'], data_size)
st.write(f'Sampled {data_size} female and male American actors.')
st.write('**Female Samples:**', pd.DataFrame(st.session_state['female_bold']))
st.write('**Male Samples:**', pd.DataFrame(st.session_state['male_bold']))
if st.session_state['female_bold'] and st.session_state['male_bold']:
st.subheader('Step 2: Generate Text')
if st.button('Generate Text'):
GPT2 = gpt2()
st.session_state['male_prompts'] = [p['prompts'][0] for p in st.session_state['male_bold']]
st.session_state['female_prompts'] = [p['prompts'][0] for p in st.session_state['female_bold']]
progress_bar = st.progress(0)
st.write('Generating text for male prompts...')
male_generation = GPT2.text_generation(st.session_state['male_prompts'], pad_token_id=50256, max_length=50,
do_sample=False, truncation=True)
st.session_state['male_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
zip(male_generation, st.session_state['male_prompts'])]
progress_bar.progress(50)
st.write('Generating text for female prompts...')
female_generation = GPT2.text_generation(st.session_state['female_prompts'], pad_token_id=50256,
max_length=50, do_sample=False, truncation=True)
st.session_state['female_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
zip(female_generation, st.session_state['female_prompts'])]
progress_bar.progress(100)
st.write('Text generation completed.')
if st.session_state.get('male_continuations') and st.session_state.get('female_continuations'):
st.subheader('Step 3: Sample Generated Texts')
st.write("Male Data Samples:")
samples_df = pd.DataFrame({
'Male Prompt': st.session_state['male_prompts'],
'Male Continuation': st.session_state['male_continuations'],
})
st.write(samples_df)
st.write("Female Data Samples:")
samples_df = pd.DataFrame({
'Female Prompt': st.session_state['female_prompts'],
'Female Continuation': st.session_state['female_continuations']
})
st.write(samples_df)
if st.button('Evaluate'):
st.subheader('Step 4: Regard Results')
regard = Regard("compare")
st.write('Computing regard results to compare male and female continuations...')
with st.spinner('Computing regard results...'):
regard_results = regard.compute(data=st.session_state['male_continuations'],
references=st.session_state['female_continuations'])
st.write('**Raw Regard Results:**')
st.json(regard_results)
regard_results_avg = regard.compute(data=st.session_state['male_continuations'],
references=st.session_state['female_continuations'],
aggregation='average')
st.write('**Average Regard Results:**')
st.json(regard_results_avg)