import streamlit as st import spacy from streamlit_echarts import st_echarts from annotated_text import annotated_text st.set_page_config( page_title="LeetSpeak-NER", page_icon=":mega:", layout="wide", initial_sidebar_state="expanded", menu_items={ 'Get Help': 'https://www.extremelycoolapp.com/help', 'Report a bug': "https://www.extremelycoolapp.com/bug", 'About': "# This is a header. This is an *extremely* cool app!" } ) @st.cache(show_spinner=False, allow_output_mutation=True, suppress_st_warning=True) def load_models(): if selected_for == "Accuracy": spanish_model = spacy.load("./spacy-models/output_full_ES_roberta-base-bne/model-best") english_model = spacy.load("./spacy-models/output_full_EN_roberta_base/model-best") if selected_for == "Efficiency": spanish_model = spacy.load("./spacy-models/toy_output_es_blank/model-best") english_model = spacy.load("./spacy-models/toy_output_en_blank/model-best/") models = {"English": english_model, "Spanish": spanish_model} return models # 'INV_CAMO', 'LEETSPEAK', 'MIX', 'PUNCT_CAMO' def process_text(doc, selected_multi_ner): tokens = [] for token in doc: if selected_multi_ner == "Yes": if token.ent_type_ == "INV_CAMO": tokens.append((token.text, "INV_CAMO", "#faa")) elif token.ent_type_ == "LEETSPEAK": tokens.append((token.text, "LEETSPEAK", "#fda")) elif token.ent_type_ == "MIX": tokens.append((token.text, "MIX", "#afa")) elif token.ent_type_ == "PUNCT_CAMO": tokens.append((token.text, "PUNCT_CAMO", "#aaaaff")) else: tokens.append(" " + token.text + " ") else: if token.ent_type_ in ['INV_CAMO', 'LEETSPEAK', 'MIX', 'PUNCT_CAMO']: tokens.append((token.text, "CAMOUFLAGE", "#ffd5aa")) else: tokens.append(" " + token.text + " ") return tokens # Side bar selected_language = st.sidebar.selectbox("Select a language", options=["English", "Spanish"]) selected_multi_ner = st.sidebar.radio('Do you want to break down the Entities detected by type of leetspeak?', ['Yes', 'No']) selected_for = st.sidebar.radio('Select for:', ['Efficiency', 'Accuracy']) models = load_models() selected_model = models[selected_language] import base64 LOGO_IMAGE = "LeetSpeak-NER-cropped.png" st.markdown( """ """, unsafe_allow_html=True ) col1, col2= st.columns([2, 2]) with col1: # st.image('./aida_logo.png') st.markdown( f""" """, unsafe_allow_html=True ) with col2: # st.image('./aida_logo.png') st.markdown( f""" """, unsafe_allow_html=True ) # st.image([LOGO_IMAGE,"aida_logo.png"], width=100) st.markdown(""" """, unsafe_allow_html=True) st.markdown('

Welcome to LeetSpeak-NER

', unsafe_allow_html=True) with st.expander("Project Description", expanded=False): st.write(""" Developed in Applied Intelligence and Data Analysis ([AI+DA](http://aida.etsisi.upm.es/)) group at Polytech University of Madrid (UPM). This tool uses a Spacy-Transformer Name Entity Recognition model to detect the presence of word camouflaged. Word camouflage is currently used to evade content moderation in Social Media. Therefore, the aim of this tool is to counter new ways of misinformation that emerge in social media platforms. Currently, two languages are supported: English and Spanish. Additionally, you can select whether the detected entities are broken down into the three types of camouflaged words: Canonical Leetspeak, Punctuation Camouflaged, Inversion Camouflaged. """) with st.expander("Try some of these examples", expanded=False): st.write(""" ENGLISH: - Desperately dominated by fam1ly sitüatløns, he leaves her. - You might as well come out to investigate a strang3 n'o?i+se or something. - But one other thing that we have to re;think is the way that we dy£ our #c!l.o|th?£+s. - And he wanted Baltimore to get that same kind of att£ntløn from the outside, but )i)n)t)r)o)s)p)e)c)t)i)o)n from the inside about what was going on with us. SPANISH - _d+i%o"s mío! - se asocian con el m13;d0 y el d'o'lor. g£rønlmo solía decir - Con las nuevas tecnologías digitales, los agrlcultør£s pueden manejar mejor el uso de sus tierras, su energía y su agua, y prepararse para el mal clima. - En el tiempo transcurrido entre mi período de escuela %s%3%c%_%n%d%a%r%1%a y el mo'm3n'to de empezar a enseñar vimos surgir el fenómeno de in't£r'net - Las pre0c_pac1on3s van desde inquietudes por las ramificaciones desestabilizadoras de una estrategia de salida de la FC, hasta aprehensión por pérdidas de capital en la rápidamente creciente cartera de valores de la Fed (actualmente de $3 billones y en camino a los $4 billones para finales de este año). """) # - Why do all these _r_e_p_o_r_t_e_r_s, who get praise and money for doing what Assange has done, maintain a cow;ardly silence (at best) while a fellow publisher faces threats of extradition, banning, and espionage charges (which can incur the death penalty), not to mention calls for his as'sa'ss1nat'i'on? # - Cada uno de estos es un crimen de guerra, un crimen contra la humanidad y, en el caso de los asesinatos masivos de la campaña de Anfal, y tal vez también en el caso de los árabes de los pantanos, el crimen más serio de todos, ge'no'ci'dio. # - No quiere decir que debamos iniciar una campaña por los derechos de los lns£ctøs st.subheader("Input Text") with st.form("my_form"): text_input = st.text_area('Insert a text to detect leetspeak entities. Try for example: "@#plan#demia, pl@πd€m1∆ instead of “pandemia” (pandemic)"', # placeholder="@#plan#demia, pl@πd€m1∆ instead of “pandemia” (pandemic)", # value="@#plan#demia, pl@πd€m1∆ instead of “pandemia” (pandemic)" ) uploaded_file = st.file_uploader("or Upload a file", type=["doc", "docx", "pdf", "txt"]) if uploaded_file is not None: text_input = uploaded_file.getvalue() text_input = text_input.decode("utf-8") # Every form must have a submit button. submitted = st.form_submit_button("Submit") st.subheader("Output") with st.spinner('Wait for it...'): doc = selected_model(text_input) tokens = process_text(doc, selected_multi_ner) annotated_text(*tokens)