eduardo-meik commited on
Commit
6d8e32f
1 Parent(s): fd69c9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -13
app.py CHANGED
@@ -1,12 +1,22 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
 
4
  # Sentiment Analysis Pipeline
5
- pipe = pipeline('sentiment-analysis')
6
 
7
  # Toxicity Classifier
8
- model_path = "citizenlab/distilbert-base-multilingual-cased-toxicity"
9
- toxicity_classifier = pipeline("text-classification", model=model_path, tokenizer=model_path)
 
 
 
 
 
 
 
 
 
 
10
 
11
  st.title("Plataforma de Diálogos Participativos")
12
 
@@ -14,31 +24,53 @@ st.title("Plataforma de Diálogos Participativos")
14
  text = st.text_area("Añade el texto a evaluar")
15
 
16
  # Create columns for buttons
17
- col1, col2 = st.columns(2)
18
 
19
- # Place each button in a separate column to make them appear on the same row
20
  run_sentiment_analysis = col1.button("Evaluar Sentimiento")
21
  run_toxicity_analysis = col2.button("Evaluar Toxicidad")
 
 
22
 
23
  # Container for output
24
  output_container = st.container()
25
 
26
- # Check if the sentiment analysis button has been pressed and if there's text in the text area
27
  if run_sentiment_analysis and text:
28
  with output_container:
29
- sentiment_output = pipe(text)
30
- st.write("Resultado del análisis de sentimiento:")
31
- st.json(sentiment_output)
 
32
  elif run_sentiment_analysis and not text:
33
  st.warning("Por favor, añade un texto para evaluar el sentimiento.")
34
 
35
- # Check if the toxicity analysis button has been pressed and if there's text in the text area
36
  if run_toxicity_analysis and text:
37
  with output_container:
38
  toxicity_output = toxicity_classifier(text)
39
- st.write("Resultado del análisis de toxicidad:")
40
- st.json(toxicity_output)
 
41
  elif run_toxicity_analysis and not text:
42
  st.warning("Por favor, añade un texto para evaluar la toxicidad.")
43
 
 
 
 
 
 
 
 
 
 
44
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import pipeline, RobertaTokenizerFast, TFRobertaForSequenceClassification, AutoTokenizer, AutoModelForSequenceClassification
3
 
4
  # Sentiment Analysis Pipeline
5
+ sentiment_pipe = pipeline('sentiment-analysis')
6
 
7
  # Toxicity Classifier
8
+ model_path_toxic = "citizenlab/distilbert-base-multilingual-cased-toxicity"
9
+ toxicity_classifier = pipeline("text-classification", model=model_path_toxic, tokenizer=model_path_toxic)
10
+
11
+ # Emotion Analysis
12
+ tokenizer_emotion = RobertaTokenizerFast.from_pretrained("arpanghoshal/EmoRoBERTa")
13
+ model_emotion = TFRobertaForSequenceClassification.from_pretrained("arpanghoshal/EmoRoBERTa")
14
+ emotion = pipeline('sentiment-analysis', model=model_emotion, tokenizer=tokenizer_emotion)
15
+
16
+ # User Needs Analysis
17
+ tokenizer_needs = AutoTokenizer.from_pretrained("thusken/nb-bert-base-user-needs")
18
+ model_needs = AutoModelForSequenceClassification.from_pretrained("thusken/nb-bert-base-user-needs")
19
+ user_needs = pipeline('text-classification', model=model_needs, tokenizer=tokenizer_needs)
20
 
21
  st.title("Plataforma de Diálogos Participativos")
22
 
 
24
  text = st.text_area("Añade el texto a evaluar")
25
 
26
  # Create columns for buttons
27
+ col1, col2, col3, col4 = st.columns(4)
28
 
29
+ # Place each button in a separate column
30
  run_sentiment_analysis = col1.button("Evaluar Sentimiento")
31
  run_toxicity_analysis = col2.button("Evaluar Toxicidad")
32
+ run_emotion_analysis = col3.button("Evaluar Emoción")
33
+ run_user_needs_analysis = col4.button("Evaluar Necesidades del Usuario")
34
 
35
  # Container for output
36
  output_container = st.container()
37
 
38
+ # Sentiment analysis
39
  if run_sentiment_analysis and text:
40
  with output_container:
41
+ sentiment_output = sentiment_pipe(text)
42
+ label = sentiment_output[0]['label']
43
+ score = round(sentiment_output[0]['score'] * 100, 2)
44
+ st.markdown(f"**Resultado del análisis de sentimiento:**\n\n- **Etiqueta:** {label}\n- **Confianza:** {score}%")
45
  elif run_sentiment_analysis and not text:
46
  st.warning("Por favor, añade un texto para evaluar el sentimiento.")
47
 
48
+ # Toxicity analysis
49
  if run_toxicity_analysis and text:
50
  with output_container:
51
  toxicity_output = toxicity_classifier(text)
52
+ label = toxicity_output[0]['label']
53
+ score = round(toxicity_output[0]['score'] * 100, 2)
54
+ st.markdown(f"**Resultado del análisis de toxicidad:**\n\n- **Etiqueta:** {label}\n- **Confianza:** {score}%")
55
  elif run_toxicity_analysis and not text:
56
  st.warning("Por favor, añade un texto para evaluar la toxicidad.")
57
 
58
+ # Emotion analysis
59
+ if run_emotion_analysis and text:
60
+ with output_container:
61
+ emotion_output = emotion(text)
62
+ label = emotion_output[0]['label']
63
+ score = round(emotion_output[0]['score'] * 100, 2)
64
+ st.markdown(f"**Resultado del análisis de emoción:**\n\n- **Etiqueta:** {label}\n- **Confianza:** {score}%")
65
+ elif run_emotion_analysis and not text:
66
+ st.warning("Por favor, añade un texto para evaluar la emoción.")
67
 
68
+ # User needs analysis
69
+ if run_user_needs_analysis and text:
70
+ with output_container:
71
+ needs_output = user_needs(text)
72
+ label = needs_output[0]['label']
73
+ score = round(needs_output[0]['score'] * 100, 2)
74
+ st.markdown(f"**Resultado del análisis de necesidades del usuario:**\n\n- **Etiqueta:** {label}\n- **Confianza:** {score}%")
75
+ elif run_user_needs_analysis and not text:
76
+ st.warning("Por favor, añade un texto para evaluar las necesidades del usuario.")