File size: 1,569 Bytes
7823eb7
6a243b1
 
82c37b3
6a243b1
7823eb7
82c37b3
 
 
98ec384
82c37b3
7823eb7
82c37b3
 
 
b7739ef
 
 
 
 
 
82c37b3
 
b7739ef
82c37b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b7739ef
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import streamlit as st
from transformers import pipeline

# Sentiment Analysis Pipeline
pipe = pipeline('sentiment-analysis')

# Toxicity Classifier
model_path = "citizenlab/distilbert-base-multilingual-cased-toxicity"
toxicity_classifier = pipeline("text-classification", model=model_path, tokenizer=model_path)

st.title("Plataforma de Diálogos Participativos")

# Text area for input
text = st.text_area("Añade el texto a evaluar")

# Create columns for buttons
col1, col2 = st.columns(2)

# Place each button in a separate column to make them appear on the same row
run_sentiment_analysis = col1.button("Evaluar Sentimiento")
run_toxicity_analysis = col2.button("Evaluar Toxicidad")

# Container for output
output_container = st.container()

# Check if the sentiment analysis button has been pressed and if there's text in the text area
if run_sentiment_analysis and text:
    with output_container:
        sentiment_output = pipe(text)
        st.write("Resultado del análisis de sentimiento:")
        st.json(sentiment_output)
elif run_sentiment_analysis and not text:
    st.warning("Por favor, añade un texto para evaluar el sentimiento.")

# Check if the toxicity analysis button has been pressed and if there's text in the text area
if run_toxicity_analysis and text:
    with output_container:
        toxicity_output = toxicity_classifier(text)
        st.write("Resultado del análisis de toxicidad:")
        st.json(toxicity_output)
elif run_toxicity_analysis and not text:
    st.warning("Por favor, añade un texto para evaluar la toxicidad.")