Spaces:
Running
Running
File size: 8,547 Bytes
7c639f7 c045836 7c639f7 c045836 7c639f7 fabd059 7c639f7 7fac43e 7c639f7 c045836 7c639f7 d392fa2 7fac43e 7c639f7 7fac43e fabd059 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 7fac43e 7c639f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
import streamlit as st
import pandas as pd
from transformers import pipeline
import torch
from transformers import AutoModel, AutoTokenizer
import torch.nn.functional as F
import torch
import torch.nn as nn
from huggingface_hub import PyTorchModelHubMixin
bert_model = AutoModel.from_pretrained("bert-base-uncased")
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
class MyModel(
nn.Module,
PyTorchModelHubMixin,
# optionally, you can add metadata which gets pushed to the model card
# repo_url="your-repo-url",
pipeline_tag="text-classification",
license="mit",
):
def __init__(self, bert_model, moral_label=2):
super(MyModel, self).__init__()
self.bert = bert_model
bert_dim = 768
self.invariant_trans = nn.Linear(768, 768)
self.moral_classification = nn.Sequential(nn.Linear(768,768),
nn.ReLU(),
nn.Linear(768, moral_label))
def forward(self, input_ids, token_type_ids, attention_mask):
pooled_output = self.bert(input_ids,
token_type_ids = token_type_ids,
attention_mask = attention_mask).last_hidden_state[:,0,:]
pooled_output = self.invariant_trans(pooled_output)
logits = self.moral_classification(pooled_output)
return logits
def preprocessing(input_text, tokenizer):
'''
Returns with the following fields:
- input_ids: list of token ids
- token_type_ids: list of token type ids
- attention_mask: list of indices (0,1) specifying which tokens should considered by the model (return_attention_mask = True).
'''
return tokenizer(
input_text,
add_special_tokens = True,
max_length = 150,
padding = 'max_length',
return_attention_mask = True,
return_token_type_ids = True, # Add this line
return_tensors = 'pt',
truncation=True
)
def convert_excel_to_csv(file):
return pd.read_excel(file)
# Function to load models from Hugging Face Hub
@st.cache_resource
def get_model_score(sentence, mft):
# repo_name = f"vjosap/moralBERT-predict-{mft}-in-text"
repo_name = f"vjosap/moralBERT-predict-{mft}-in-{model_type}"
# loading the model
model = MyModel.from_pretrained(repo_name, bert_model=bert_model)
# preprocessing the text
encodeds = preprocessing(sentence, tokenizer)
# predicting the mft score
output = model(**encodeds)
score = F.softmax(output, dim=1)
# extracting and return the second value from the tensor
#mft_value = score[0, 1].item()
mft_value = score[:, 1].tolist()
return mft_value
@st.cache_resource
def load_model(model_name):
return pipeline("text-classification", model=model_name, return_all_scores=True)
def set_custom_theme():
st.markdown("""
<style>
:root {
--primary-color: #FF4B4B;
--background-color: #FFFFFF;
--secondary-background-color: #F0F2F6;
--text-color: #262730;
--font: sans-serif;
}
</style>
""", unsafe_allow_html=True)
# Apply custom theme
set_custom_theme()
# Existing page element style
page_element = """
<style>
[data-testid="stAppViewContainer"] {
background-image: url("https://images.unsplash.com/photo-1656274404439-b8b463c73194?q=80&w=1992&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D");
background-size: cover;
}
[data-testid="stHeader"] {
background-color: rgba(0, 0, 0, 0);
}
[data-testid="stToolbar"] {
right: 2rem;
background-image: url("https://img.freepik.com/premium-vector/burger-icon-isolated-illustration_92753-2926.jpg?w=2000");
background-size: cover;
}
body {
color: black !important; /* Set font color to black */
}
</style>
"""
# Add the style to your Streamlit app
st.markdown(page_element, unsafe_allow_html=True)
# File upload section
st.title("Moral Values Detection App")
st.markdown("Authors: [Vjosa Preniqi](https://scholar.google.com/citations?user=CLZ3LL4AAAAJ&hl=en) and [Iacopo Ghinassi](https://scholar.google.com/citations?user=ANXW5EAAAAAJ&hl=en).")
st.header("Introduction", divider = "red")
st.markdown("""This app implements the models described in the papers [MoralBERT: A Fine-Tuned Language Model for Capturing Moral Values in Social Discussions](https://dl.acm.org/doi/abs/10.1145/3677525.3678694) and [Automatic Detection of Moral Values in Music Lyrics](https://arxiv.org/abs/2407.18787). With this app, you can automatically predict and label text or music lyrics with 10 moral categories. To use it, upload a CSV or Excel file with a single column named "text" (for regular text) or "lyrics" (for music lyrics). Each row should have the text or lyric you want to analyse. Keep in mind, the process may take up to 10 seconds per entry, and the models work best with social media content and music lyrics. Once the analysis is done, the app will show a table with the predicted probabilities for each moral value. You can download this table using the provided button. Note that some moral values might have consistently low probabilities. If that's the case, you may need to use a lower threshold to identify them—check the original papers for more details.""")
model_type = st.radio("Select the content type to choose the appropriate prediction model", ('text', 'lyrics'))
# Warning if model type is not selected before file upload
if not model_type:
st.warning("Please pick the content type for prediction first.")
# File upload section
st.write(f"Upload a CSV or Excel file with a column named 'text' or 'lyrics' based on the model type you chose.")
uploaded_file = st.file_uploader("Choose a CSV or Excel file", type=["csv", "xlsx"])
if uploaded_file is not None:
# Read CSV
file_extension = uploaded_file.name.split('.')[-1].lower()
if file_extension == "csv":
df = pd.read_csv(uploaded_file)
elif file_extension == "xlsx":
df = convert_excel_to_csv(uploaded_file)
# Validation: Check if the correct column exists based on the selected model type
expected_column = 'lyrics' if model_type == 'lyrics' else 'text'
if expected_column not in df.columns:
st.error(f"Please upload a CSV file with a '{expected_column}' column since you selected '{model_type}' for prediction.")
else:
textual_content = df[expected_column].tolist()
# List of moral foundation models
models = ["care", "harm", "fairness", "cheating", "loyalty", "betrayal", "authority", "subversion", "purity", "degradation"]
# Initialize the dataframe to store results
result_df = pd.DataFrame(textual_content, columns=[expected_column])
batch_size = 64
# Perform predictions using each model
for model_name in models:
st.write(f"Processing with model: {model_name}")
probabilities = []
# Process in batches
for idx in range(len(textual_content)//batch_size):
preds = get_model_score(textual_content[idx*batch_size:(idx+1)*batch_size], model_name)
probabilities.extend(preds)
# if len(textual_content)%batch_size:
# preds = get_model_score(textual_content[(idx+1)*batch_size:], model_name)
# probabilities.extend(preds)
# Handle the remainder if the total number of items isn't a multiple of batch_size
remainder_start = (len(textual_content) // batch_size) * batch_size
if len(textual_content) % batch_size:
preds = get_model_score(textual_content[remainder_start:], model_name)
probabilities.extend(preds)
# Add the results to the dataframe
result_df[f'{model_name}'] = probabilities
# Display the dataframe
st.dataframe(result_df)
# Button to download the dataframe as CSV
@st.cache_data
def convert_df(df):
return df.to_csv(index=False).encode('utf-8')
csv = convert_df(result_df)
st.download_button(
label="Download Results as CSV",
data=csv,
file_name='predictions.csv',
mime='text/csv',
) |