Spaces:
Runtime error
Runtime error
unaiolaizola
commited on
Commit
·
2a25522
1
Parent(s):
2e36bee
Upload 10 files
Browse files- app.py +654 -0
- autonomia.txt +4 -0
- compromiso.txt +4 -0
- data.csv +0 -0
- daw_obj.txt +18 -0
- mark_obj.txt +14 -0
- participacion.txt +4 -0
- person1.png +0 -0
- person2.png +0 -0
- requirements.txt +14 -0
app.py
ADDED
@@ -0,0 +1,654 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pyChatGPT import ChatGPT
|
2 |
+
import streamlit as st
|
3 |
+
import string
|
4 |
+
from sklearn.feature_extraction.text import CountVectorizer
|
5 |
+
from nltk.corpus import stopwords
|
6 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
7 |
+
import pandas as pd
|
8 |
+
import numpy as np
|
9 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
10 |
+
from scipy.special import softmax
|
11 |
+
import stylecloud
|
12 |
+
from stop_words import get_stop_words
|
13 |
+
from PIL import Image
|
14 |
+
from pysentimiento import create_analyzer
|
15 |
+
import altair as alt
|
16 |
+
from sentence_transformers import SentenceTransformer
|
17 |
+
|
18 |
+
#LIMPIAR FRASES
|
19 |
+
def clean_string(text):
|
20 |
+
if text == "nan":
|
21 |
+
return ""
|
22 |
+
text = ''.join([word for word in text if word not in string.punctuation])
|
23 |
+
text = text.lower()
|
24 |
+
return text
|
25 |
+
|
26 |
+
#CARGAR MODELO
|
27 |
+
roberta = f"cardiffnlp/twitter-xlm-roberta-base-sentiment"
|
28 |
+
model = AutoModelForSequenceClassification.from_pretrained(roberta)
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(roberta)
|
30 |
+
modelS = SentenceTransformer('hiiamsid/sentence_similarity_spanish_es')
|
31 |
+
|
32 |
+
|
33 |
+
#LEER Y CLASIFICAR LAS RESPUESTAS
|
34 |
+
data = pd.read_csv(r'data.csv')
|
35 |
+
|
36 |
+
person1_objetives, person1_difficulties, person1_utilities, person2_objetives, person2_difficulties, person2_utilities = [], [], [], [], [], []
|
37 |
+
person1_all_text, person2_all_text = [], []
|
38 |
+
for index, row in data.iterrows():
|
39 |
+
if row["DNI"] == "72838728M":
|
40 |
+
person1_objetives.append(str((row["objeto_si"])))
|
41 |
+
person1_difficulties.append(str((row["objeto_no"])) + " " + str((row["que_necesito"])))
|
42 |
+
person1_utilities.append(str((row["para_que"])) + " " + str((row["como_uso"])))
|
43 |
+
person1_all_text.append(clean_string(str((row["objeto_si"]))) + ". " + clean_string(str((row["objeto_no"]))) + ". " + clean_string(str((row["que_necesito"]))) + ". " + clean_string(str((row["para_que"]))) + ". " + clean_string(str((row["como_uso"]))))
|
44 |
+
elif row["DNI"] == "73233278J":
|
45 |
+
person2_objetives.append(str((row["objeto_si"])))
|
46 |
+
person2_difficulties.append(str((row["objeto_no"])) + " " + str((row["que_necesito"])))
|
47 |
+
person2_utilities.append(str((row["para_que"])) + " " + str((row["como_uso"])))
|
48 |
+
person2_all_text.append(str((row["objeto_si"])) + ". " + str((row["objeto_no"])) + ". " + str((row["que_necesito"])) + ". " + str((row["para_que"])) + ". " + str((row["como_uso"])))
|
49 |
+
|
50 |
+
|
51 |
+
#WORDCLOUDS
|
52 |
+
person1_wordcloud = " ".join(person1_objetives)
|
53 |
+
person2_wordcloud = " ".join(person2_objetives)
|
54 |
+
irrelevant_words = get_stop_words("spanish")
|
55 |
+
custom_irrelevant_words = irrelevant_words[:]
|
56 |
+
custom_irrelevant_words.extend(["hacer","realizar","aprender","aprendido"])
|
57 |
+
|
58 |
+
stylecloud.gen_stylecloud(text=person1_wordcloud, custom_stopwords=custom_irrelevant_words, icon_name="fas fa-circle", output_name="person1.png")
|
59 |
+
person1 = Image.open("person1.png")
|
60 |
+
stylecloud.gen_stylecloud(text=person2_wordcloud, custom_stopwords=custom_irrelevant_words, icon_name="fas fa-circle", output_name="person2.png")
|
61 |
+
person2 = Image.open("person2.png")
|
62 |
+
|
63 |
+
|
64 |
+
#LEER OBJETIVOS COMPETENCIAS Y AUTONOMIA
|
65 |
+
f1 = open('daw_obj.txt','r', encoding="utf8")
|
66 |
+
objetivos1 = f1.read()
|
67 |
+
f2 = open('mark_obj.txt','r', encoding="utf8")
|
68 |
+
objetivos2 = f2.read()
|
69 |
+
f3 = open('autonomia.txt','r', encoding="utf8")
|
70 |
+
autonomia = f3.read()
|
71 |
+
f4 = open('participacion.txt','r', encoding="utf8")
|
72 |
+
participacion = f4.read()
|
73 |
+
f5 = open('compromiso.txt','r', encoding="utf8")
|
74 |
+
compromiso = f5.read()
|
75 |
+
|
76 |
+
#LLAMADA AL CHATGPT
|
77 |
+
# session_token = "eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..1K4xDB69QDCvq957.8XFvLu5dFg23jOdjkDyT-B_LE826oFzkcnRUJmx-poHDheX45HTf0m3cKKSgRp2B6QXxMR01ELGOHb0ZdeS5TGXC_8qyl9xTX1MvvFIkxLDVEc884xroPBFJdne2d-xoQrriAkDWZQFhE87tJSLlID-BZBKgUS_leaCbxJL87_KTxBKU4F_DNI-P_RMUL8ErLNZEFVs_CISJMMQLSpPA1GDAtecSPll55_FGuoNI3iYEYT-Rro3pFBOXdJhiEgmoKvWfVoItdN8NemVtXxXHFGl3XlZUgh5F7b6LT6id2MO5y5uZv_04lkw6mSl-Bh7ziBmXw2qtQY9vGX5s2p4SKI4CduaEMZtLslZlPM0p23fnGoIt2BYC7ijSw2nwqOLnl_axGJK0Sw1Jpmy5moNRs8yQcusQ2qMPl8g3r9WIosfuaoIz8qRiiP2nSzYUwfGI3-fRzsnXC3XtN-sfeywH3TFIMWo9MvKa0mU3SWfQZ8H2PZvXAUZ7-_j8Eopz6fYxpwAImJD1gIrG2JGTKyU4Mffh8_IBAo7yt9W8T6NLdXyCT9t5536i751Ga9CW6ahTBb7f3RWPwcsNnIB7VMwxwy996uwBquHiGWua-gepZw2PsO7yEQB3xZKCdafur-MegcxcWep_qbpmGo6-8AEGKLgLKD8Ed6MS4rnrcPLKfcHAvboO7SNmBuB4-lBUltaWTPDEfiXK25OXbwpQ7qychURy9OLd4fPuYtP3gwGOVi6k1Mni2rI4oa_XAhlJTvH6MMaYZxQHVXSTNOcgLXx9cz1JTqnkmk4mRHnvlj8uoyVszXQi2EFq1ozz4bxFCiir4wYzBdCwC2bp5S--i7E89xL3RQ8DvCMKO3q3Ro3nPU8hD4QItoCHgHaxpexrtiq_4feHmVl9A4cAFEkTGyjC4ZuNT0Ety0fsM0JtytFNiTnBHGqB7ZNOSLMyjNqEs7IpnBxRlzCB5afLDG5cP3ipOIMILSVyEv2je8yWSEx2E5ogSL-inO2p-EcThnT5KxySZMmCDT25qQmLI0Gk8afm8M--c1PUd3Z2ZXx50ouqvftZyEjlTocQfoAITVIUc6cCXhEuCsIL4RuyVvz5Ps73WuM0K9MmESn8iQRddz_03MxyHHsDdGoiT97TaGz-ivOjoO2eRdzOwU5k0JbJH5xZOWSvpVg2wnYWKLv2_gOjEMrTYxx_4kSBxpeYKyiIhFKug6nuRUmRwfCksPiWjNEVLjPo_x0_K_thH0jii76WQcq-224VibB3hAMTMxdr7aLVwqPJNVHOVI0Log4vJcledthlzilRGiw4kNBOYqo95DyYjXZJ5haKnUdsQrb9pwCBmXeK0PFxssZ904Wwpd22tH9w5ZvJZCz5p39tniakd9UeOHaPQmY3N26jzXfV4h1w3lkdzjrBEEMwxuUFjaaolQE6GKpswRiDdHZm4mbGOHkQYeMYebEVhy17r9drLvTc4QrlwNuP8HA4vfgQTUvqo64QM6RvIvqHftawkVazxNYUEhTmWsuUemZXI-GHLTbDrfD9BafGI9yk3hp8bG2u8R9ZvPZAA4R1wkBCQiY1BumfaGN49ETuQRJF0HTf4mRVJ9b6BSbgEO7tzAN2adQ0T22ePh2FkUqmOmDHHp_QwPFja5NCfLebcLyBLqxDkdcLANfpS0g-BkraV0ZpuU4_SZrb3Qu1Et8tnF5coVJwZWm6A-1PHhClHpf3KEz5F5MVfkAPCAiSMPqD8UaTCGWbMY7CHUav9IyuX7-uoAOzz9ZyoUQuDC9-OGHQb8x15XzsNWffCRpJwjWMBTRBB_rw5HwXuWBBWk-GWzXZtSHhWRXhouRhoUjKloqhgUfeNmL1gg4lusel5NQF6phwVek1V3oJknO1XezLjyVeio69_OOzkqosSkHs2ZskisqnFfL--LG0m5TO9-o88OYESeZIO4tQuUSN8HYxyqtaWo0iiJHxMDumo8fJypiR5z5L13aGrNA8ZPm2S_tg-Mmz34wqgLihFhgDRMMqu87dYXrF78oz3uKbYmhYsCk-jY519yUgwOfiC3CrfG6LqTcbWCVbmh-yogcstCV-nLfTeosGIZUHNI_H5didKNzh6hzUjoYHUhpiCDFD7mM89lm5EoeEa3S1ZNoAhO-4QXYIA5AajgxtES9SX7hPfP94zh9rm-l__9eJkWg7KOblWBYTq1eT71FxtN9fEIJZQ7pa9h1UEvgJL8aP2EJ3yWY5KFZ6GGcKpFf9x_omFxDxF8AdQ5n9uSKNcK0I8wz43FF8HRbWYWumUV3n7GpEZS7hGgB7ynRkYZ-X6ZtQj9VADJtUojMcAwYM7KdCqO1FBBKOd8McBirXkjzYfS3-LEjeYCdu67ogQYF3toxzR3Xhc_rcfcHnHh601I4opVb9mZBiws_d31146_CqJ-r5NF1PDnm9JfOURLua2ySMDd2B13uzP3L0BFveLR2Dq62BO2grKLEbn97__2HtESrVp-ozDqc9yLHSxhPjmWUAy9xCPDBlQNt8KrA.FxxuIKliXcdC8v7F1lX9sQ"
|
78 |
+
# api = ChatGPT(session_token)
|
79 |
+
# text = api.send_message(f'objetivos y competencias en un grado superior de desarrolllo de aplicaciones web')
|
80 |
+
|
81 |
+
|
82 |
+
#LIMPIAR FRASES
|
83 |
+
stopwords = get_stop_words('spanish')
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
#SIMILITUD DE COSENOS
|
88 |
+
def cosine_sim_vectors(vec1, vec2):
|
89 |
+
vec1 = vec1.reshape(1, -1)
|
90 |
+
vec2 = vec2.reshape(1, -1)
|
91 |
+
|
92 |
+
return cosine_similarity(vec1, vec2)[0][0]
|
93 |
+
|
94 |
+
|
95 |
+
#PROBAR CON RESPUESTAS DE PERSONA1
|
96 |
+
#objetivos
|
97 |
+
person1_objetives.append(objetivos1)
|
98 |
+
cleaned2 = list(map(clean_string, person1_objetives))
|
99 |
+
embeddings = modelS.encode(cleaned2)
|
100 |
+
aut = []
|
101 |
+
for idx,answer in enumerate(cleaned2[:-1]):
|
102 |
+
aut.append(cosine_similarity([embeddings[-1]],[embeddings[idx]]))
|
103 |
+
similarities1 = []
|
104 |
+
for answer in aut:
|
105 |
+
similarities1.append(float(answer[0][0]))
|
106 |
+
|
107 |
+
index = [*range(0,len(similarities1),1)]
|
108 |
+
|
109 |
+
chart_objetives1 = pd.DataFrame({
|
110 |
+
'x':index,
|
111 |
+
'y':similarities1
|
112 |
+
}
|
113 |
+
)
|
114 |
+
o1 = alt.Chart(chart_objetives1).mark_area().encode(
|
115 |
+
x=alt.X('x', title="Semanas"),
|
116 |
+
y=alt.Y('y', title=""),
|
117 |
+
color=alt.value("#3399ff")
|
118 |
+
)
|
119 |
+
|
120 |
+
#dificultades
|
121 |
+
difficulties1 = []
|
122 |
+
cleanedD1 = list(map(clean_string, person1_difficulties))
|
123 |
+
for idx,answer in enumerate(cleanedD1):
|
124 |
+
encoded_text1 = tokenizer(answer, return_tensors='pt')
|
125 |
+
output1 = model(**encoded_text1)
|
126 |
+
scores1 = output1[0][0].detach().numpy()
|
127 |
+
scores1 = softmax(scores1)
|
128 |
+
difficulties1.append(scores1)
|
129 |
+
|
130 |
+
color_scale = alt.Scale(
|
131 |
+
domain=[
|
132 |
+
"positivo",
|
133 |
+
"neutral",
|
134 |
+
"negativo",
|
135 |
+
],
|
136 |
+
range=["#33cc33", "#6699ff", "#ff0000"]
|
137 |
+
)
|
138 |
+
|
139 |
+
y_axis = alt.Axis(
|
140 |
+
title='Semanas',
|
141 |
+
offset=5,
|
142 |
+
ticks=False,
|
143 |
+
minExtent=60,
|
144 |
+
domain=False
|
145 |
+
)
|
146 |
+
source1 = []
|
147 |
+
|
148 |
+
for idx,d in enumerate(difficulties1):
|
149 |
+
start,end = -d[1]/2,d[1]/2
|
150 |
+
source1.append(
|
151 |
+
{
|
152 |
+
"question":idx+1,
|
153 |
+
"type":"neutral",
|
154 |
+
"value":d[1],
|
155 |
+
"start":start,
|
156 |
+
"end":end
|
157 |
+
}
|
158 |
+
)
|
159 |
+
source1.append(
|
160 |
+
{
|
161 |
+
"question":idx+1,
|
162 |
+
"type":"negativo",
|
163 |
+
"value":d[0],
|
164 |
+
"start":start,
|
165 |
+
"end":start-d[0]
|
166 |
+
}
|
167 |
+
)
|
168 |
+
source1.append(
|
169 |
+
{
|
170 |
+
"question":idx+1,
|
171 |
+
"type":"positivo",
|
172 |
+
"value":d[2],
|
173 |
+
"start":end,
|
174 |
+
"end":end+d[2]
|
175 |
+
}
|
176 |
+
)
|
177 |
+
|
178 |
+
|
179 |
+
source1 = alt.pd.DataFrame(source1)
|
180 |
+
|
181 |
+
|
182 |
+
d1 = alt.Chart(source1).mark_bar().encode(
|
183 |
+
x=alt.X('start:Q', title=""),
|
184 |
+
x2='end:Q',
|
185 |
+
y=alt.Y('question:N', axis=y_axis),
|
186 |
+
color=alt.Color(
|
187 |
+
'type:N',
|
188 |
+
legend=alt.Legend( title='Sentimiento:'),
|
189 |
+
scale=color_scale,
|
190 |
+
)
|
191 |
+
)
|
192 |
+
|
193 |
+
#utilidad
|
194 |
+
utilities1 = []
|
195 |
+
cleanedU1 = list(map(clean_string, person1_utilities))
|
196 |
+
for idx,answer in enumerate(cleanedU1):
|
197 |
+
encoded_text1 = tokenizer(answer, return_tensors='pt')
|
198 |
+
output1 = model(**encoded_text1)
|
199 |
+
scores1 = output1[0][0].detach().numpy()
|
200 |
+
scores1 = softmax(scores1)
|
201 |
+
utilities1.append(scores1)
|
202 |
+
|
203 |
+
source2 = []
|
204 |
+
|
205 |
+
for idx,d in enumerate(utilities1):
|
206 |
+
start,end = -d[1]/2,d[1]/2
|
207 |
+
source2.append(
|
208 |
+
{
|
209 |
+
"question":idx+1,
|
210 |
+
"type":"neutral",
|
211 |
+
"value":d[1],
|
212 |
+
"start":start,
|
213 |
+
"end":end
|
214 |
+
}
|
215 |
+
)
|
216 |
+
source2.append(
|
217 |
+
{
|
218 |
+
"question":idx+1,
|
219 |
+
"type":"negativo",
|
220 |
+
"value":d[0],
|
221 |
+
"start":start,
|
222 |
+
"end":start-d[0]
|
223 |
+
}
|
224 |
+
)
|
225 |
+
source2.append(
|
226 |
+
{
|
227 |
+
"question":idx+1,
|
228 |
+
"type":"positivo",
|
229 |
+
"value":d[2],
|
230 |
+
"start":end,
|
231 |
+
"end":end+d[2]
|
232 |
+
}
|
233 |
+
)
|
234 |
+
|
235 |
+
|
236 |
+
source2 = alt.pd.DataFrame(source2)
|
237 |
+
|
238 |
+
|
239 |
+
u1 = alt.Chart(source2).mark_bar().encode(
|
240 |
+
x=alt.X('start:Q', title=""),
|
241 |
+
x2='end:Q',
|
242 |
+
y=alt.Y('question:N', axis=y_axis),
|
243 |
+
color=alt.Color(
|
244 |
+
'type:N',
|
245 |
+
legend=alt.Legend( title='Sentimiento:'),
|
246 |
+
scale=color_scale,
|
247 |
+
)
|
248 |
+
)
|
249 |
+
|
250 |
+
#emocion
|
251 |
+
emotion_analyzer = create_analyzer(task="emotion", lang="es")
|
252 |
+
emotions = emotion_analyzer.predict(person1_all_text)
|
253 |
+
|
254 |
+
emotions_data = []
|
255 |
+
|
256 |
+
for emotion in emotions:
|
257 |
+
emotion = emotion.probas
|
258 |
+
emotions_data.append([emotion["joy"], emotion["sadness"], emotion["anger"], emotion["surprise"], emotion["disgust"], emotion["fear"], emotion["others"]])
|
259 |
+
|
260 |
+
chart_data = pd.DataFrame(
|
261 |
+
emotions_data,
|
262 |
+
columns=["1-alegria","2-tristeza","3-enfado","4-sorpresa","5-disgusto","6-miedo","7-otros"]
|
263 |
+
)
|
264 |
+
|
265 |
+
data1 = pd.melt(chart_data.reset_index(), id_vars=["index"])
|
266 |
+
|
267 |
+
chart = (
|
268 |
+
alt.Chart(data1)
|
269 |
+
.mark_bar()
|
270 |
+
.encode(
|
271 |
+
x=alt.X("value", type="quantitative", title=""),
|
272 |
+
y=alt.Y("index", type="nominal", title="", axis=y_axis),
|
273 |
+
color=alt.Color("variable", type="nominal", title="", legend=alt.Legend( title='Emociones:')),
|
274 |
+
order=alt.Order("variable", sort="ascending"),
|
275 |
+
)
|
276 |
+
)
|
277 |
+
|
278 |
+
#autonomia
|
279 |
+
person1_all_text.append(autonomia)
|
280 |
+
embeddings = modelS.encode(person1_all_text)
|
281 |
+
aut = []
|
282 |
+
for idx,answer in enumerate(person1_all_text[:-1]):
|
283 |
+
aut.append(cosine_similarity([embeddings[-1]],[embeddings[idx]]))
|
284 |
+
aut_similarities1 = []
|
285 |
+
for answer in aut:
|
286 |
+
aut_similarities1.append(float(answer[0][0]))
|
287 |
+
|
288 |
+
index = [*range(0,len(aut_similarities1),1)]
|
289 |
+
|
290 |
+
chart_autonomia1 = pd.DataFrame({
|
291 |
+
'x':index,
|
292 |
+
'y':aut_similarities1
|
293 |
+
})
|
294 |
+
|
295 |
+
a1 = alt.Chart(chart_autonomia1).mark_area().encode(
|
296 |
+
x=alt.X('x', title="Semanas"),
|
297 |
+
y=alt.Y('y', title=""),
|
298 |
+
color=alt.value("#660033")
|
299 |
+
)
|
300 |
+
|
301 |
+
person1_all_text.pop()
|
302 |
+
|
303 |
+
#participacion
|
304 |
+
|
305 |
+
person1_all_text.append(participacion)
|
306 |
+
cleaned1 = list(map(clean_string, person1_all_text))
|
307 |
+
embeddings = modelS.encode(cleaned1)
|
308 |
+
par = []
|
309 |
+
for idx,answer in enumerate(cleaned1[:-1]):
|
310 |
+
par.append(cosine_similarity([embeddings[-1]],[embeddings[idx]]))
|
311 |
+
par_similarities1 = []
|
312 |
+
for answer in par:
|
313 |
+
par_similarities1.append(float(answer[0][0]))
|
314 |
+
|
315 |
+
chart_participacion1 = pd.DataFrame({
|
316 |
+
'x':index,
|
317 |
+
'y':par_similarities1
|
318 |
+
})
|
319 |
+
|
320 |
+
p1 = alt.Chart(chart_participacion1).mark_area().encode(
|
321 |
+
x=alt.X('x', title="Semanas"),
|
322 |
+
y=alt.Y('y', title=""),
|
323 |
+
color=alt.value("#33cc33")
|
324 |
+
)
|
325 |
+
|
326 |
+
person1_all_text.pop()
|
327 |
+
|
328 |
+
#compromiso
|
329 |
+
|
330 |
+
person1_all_text.append(compromiso)
|
331 |
+
cleaned1 = list(map(clean_string, person1_all_text))
|
332 |
+
embeddings = modelS.encode(cleaned1)
|
333 |
+
com = []
|
334 |
+
for idx,answer in enumerate(cleaned1[:-1]):
|
335 |
+
com.append(cosine_similarity([embeddings[-1]],[embeddings[idx]]))
|
336 |
+
com_similarities = []
|
337 |
+
for answer in com:
|
338 |
+
com_similarities.append(float(answer[0][0]))
|
339 |
+
|
340 |
+
chart_compromiso1 = pd.DataFrame({
|
341 |
+
'x':index,
|
342 |
+
'y':com_similarities
|
343 |
+
})
|
344 |
+
|
345 |
+
c1 = alt.Chart(chart_compromiso1).mark_area().encode(
|
346 |
+
x=alt.X('x', title="Semanas"),
|
347 |
+
y=alt.Y('y', title=""),
|
348 |
+
color=alt.value("#ff6600")
|
349 |
+
)
|
350 |
+
|
351 |
+
person1_all_text.pop()
|
352 |
+
|
353 |
+
#PERSONA2
|
354 |
+
#objetivos
|
355 |
+
|
356 |
+
person2_objetives.append(objetivos2)
|
357 |
+
cleaned2 = list(map(clean_string, person2_objetives))
|
358 |
+
embeddings = modelS.encode(cleaned2)
|
359 |
+
aut = []
|
360 |
+
for idx,answer in enumerate(cleaned2[:-1]):
|
361 |
+
aut.append(cosine_similarity([embeddings[-1]],[embeddings[idx]]))
|
362 |
+
similarities2 = []
|
363 |
+
for answer in aut:
|
364 |
+
similarities2.append(float(answer[0][0]))
|
365 |
+
|
366 |
+
index = [*range(0,len(similarities2),1)]
|
367 |
+
|
368 |
+
chart_objetives2 = pd.DataFrame({
|
369 |
+
'x':index,
|
370 |
+
'y':similarities2
|
371 |
+
}
|
372 |
+
)
|
373 |
+
|
374 |
+
o2 = alt.Chart(chart_objetives2).mark_area().encode(
|
375 |
+
x=alt.X('x', title="Semanas"),
|
376 |
+
y=alt.Y('y', title=""),
|
377 |
+
color=alt.value("#3399ff")
|
378 |
+
)
|
379 |
+
|
380 |
+
#dicultades
|
381 |
+
difficulties2 = []
|
382 |
+
cleanedD2 = list(map(clean_string, person2_difficulties))
|
383 |
+
for idx,answer in enumerate(cleanedD2):
|
384 |
+
encoded_text2 = tokenizer(answer, return_tensors='pt')
|
385 |
+
output2 = model(**encoded_text2)
|
386 |
+
scores2 = output2[0][0].detach().numpy()
|
387 |
+
scores2 = softmax(scores2)
|
388 |
+
difficulties2.append(scores2)
|
389 |
+
|
390 |
+
source3 = []
|
391 |
+
|
392 |
+
for idx,d in enumerate(difficulties2):
|
393 |
+
start,end = -d[1]/2,d[1]/2
|
394 |
+
source3.append(
|
395 |
+
{
|
396 |
+
"question":idx+1,
|
397 |
+
"type":"neutral",
|
398 |
+
"value":d[1],
|
399 |
+
"start":start,
|
400 |
+
"end":end
|
401 |
+
}
|
402 |
+
)
|
403 |
+
source3.append(
|
404 |
+
{
|
405 |
+
"question":idx+1,
|
406 |
+
"type":"negativo",
|
407 |
+
"value":d[0],
|
408 |
+
"start":start,
|
409 |
+
"end":start-d[0]
|
410 |
+
}
|
411 |
+
)
|
412 |
+
source3.append(
|
413 |
+
{
|
414 |
+
"question":idx+1,
|
415 |
+
"type":"positivo",
|
416 |
+
"value":d[2],
|
417 |
+
"start":end,
|
418 |
+
"end":end+d[2]
|
419 |
+
}
|
420 |
+
)
|
421 |
+
|
422 |
+
|
423 |
+
source3 = alt.pd.DataFrame(source3)
|
424 |
+
|
425 |
+
|
426 |
+
d2 = alt.Chart(source3).mark_bar().encode(
|
427 |
+
x=alt.X('start:Q', title=""),
|
428 |
+
x2='end:Q',
|
429 |
+
y=alt.Y('question:N', axis=y_axis),
|
430 |
+
color=alt.Color(
|
431 |
+
'type:N',
|
432 |
+
legend=alt.Legend( title='Sentimiento:'),
|
433 |
+
scale=color_scale,
|
434 |
+
)
|
435 |
+
)
|
436 |
+
|
437 |
+
#utilidad
|
438 |
+
utilities2 = []
|
439 |
+
cleanedU2 = list(map(clean_string, person2_utilities))
|
440 |
+
for idx,answer in enumerate(cleanedU2):
|
441 |
+
encoded_text2 = tokenizer(answer, return_tensors='pt')
|
442 |
+
output2 = model(**encoded_text2)
|
443 |
+
scores2 = output2[0][0].detach().numpy()
|
444 |
+
scores2 = softmax(scores2)
|
445 |
+
utilities2.append(scores2)
|
446 |
+
|
447 |
+
source4 = []
|
448 |
+
|
449 |
+
for idx,d in enumerate(utilities2):
|
450 |
+
start,end = -d[1]/2,d[1]/2
|
451 |
+
source4.append(
|
452 |
+
{
|
453 |
+
"question":idx+1,
|
454 |
+
"type":"neutral",
|
455 |
+
"value":d[1],
|
456 |
+
"start":start,
|
457 |
+
"end":end
|
458 |
+
}
|
459 |
+
)
|
460 |
+
source4.append(
|
461 |
+
{
|
462 |
+
"question":idx+1,
|
463 |
+
"type":"negativo",
|
464 |
+
"value":d[0],
|
465 |
+
"start":start,
|
466 |
+
"end":start-d[0]
|
467 |
+
}
|
468 |
+
)
|
469 |
+
source4.append(
|
470 |
+
{
|
471 |
+
"question":idx+1,
|
472 |
+
"type":"positivo",
|
473 |
+
"value":d[2],
|
474 |
+
"start":end,
|
475 |
+
"end":end+d[2]
|
476 |
+
}
|
477 |
+
)
|
478 |
+
|
479 |
+
|
480 |
+
source4 = alt.pd.DataFrame(source4)
|
481 |
+
|
482 |
+
|
483 |
+
u2 = alt.Chart(source4).mark_bar().encode(
|
484 |
+
x=alt.X('start:Q', title=""),
|
485 |
+
x2='end:Q',
|
486 |
+
y=alt.Y('question:N', axis=y_axis),
|
487 |
+
color=alt.Color(
|
488 |
+
'type:N',
|
489 |
+
legend=alt.Legend( title='Sentimiento:'),
|
490 |
+
scale=color_scale,
|
491 |
+
)
|
492 |
+
)
|
493 |
+
|
494 |
+
#emocion
|
495 |
+
|
496 |
+
emotions2 = emotion_analyzer.predict(person2_all_text)
|
497 |
+
emotions_data2 = []
|
498 |
+
|
499 |
+
for emotion in emotions2:
|
500 |
+
emotion = emotion.probas
|
501 |
+
emotions_data2.append([emotion["joy"], emotion["sadness"], emotion["anger"], emotion["surprise"], emotion["disgust"], emotion["fear"], emotion["others"]])
|
502 |
+
|
503 |
+
chart_data2 = pd.DataFrame(
|
504 |
+
emotions_data2,
|
505 |
+
columns=["1-alegria","2-tristeza","3-enfado","4-sorpresa","5-disgusto","6-miedo","7-otros"]
|
506 |
+
)
|
507 |
+
|
508 |
+
|
509 |
+
data2 = pd.melt(chart_data2.reset_index(), id_vars=["index"])
|
510 |
+
|
511 |
+
chart2 = (
|
512 |
+
alt.Chart(data2)
|
513 |
+
.mark_bar()
|
514 |
+
.encode(
|
515 |
+
x=alt.X("value", type="quantitative", title=""),
|
516 |
+
y=alt.Y("index", type="nominal", title="", axis=y_axis),
|
517 |
+
color=alt.Color("variable", type="nominal", title="", legend=alt.Legend( title='Emociones:')),
|
518 |
+
order=alt.Order("variable", sort="ascending"),
|
519 |
+
)
|
520 |
+
)
|
521 |
+
|
522 |
+
#autonomia
|
523 |
+
|
524 |
+
person2_all_text.append(autonomia)
|
525 |
+
embeddings2 = modelS.encode(person2_all_text)
|
526 |
+
aut2 = []
|
527 |
+
for idx,answer in enumerate(person2_all_text[:-1]):
|
528 |
+
aut2.append(cosine_similarity([embeddings2[-1]],[embeddings2[idx]]))
|
529 |
+
aut_similarities2 = []
|
530 |
+
for answer in aut2:
|
531 |
+
aut_similarities2.append(float(answer[0][0]))
|
532 |
+
|
533 |
+
index = [*range(0,len(aut_similarities2),1)]
|
534 |
+
|
535 |
+
chart_autonomia2 = pd.DataFrame({
|
536 |
+
'x':index,
|
537 |
+
'y':aut_similarities2
|
538 |
+
})
|
539 |
+
|
540 |
+
a2 = alt.Chart(chart_autonomia2).mark_area().encode(
|
541 |
+
x=alt.X('x', title="Semanas"),
|
542 |
+
y=alt.Y('y', title=""),
|
543 |
+
color=alt.value("#660033")
|
544 |
+
)
|
545 |
+
|
546 |
+
person2_all_text.pop()
|
547 |
+
|
548 |
+
#participacion
|
549 |
+
|
550 |
+
person2_all_text.append(participacion)
|
551 |
+
cleaned1 = list(map(clean_string, person2_all_text))
|
552 |
+
embeddings = modelS.encode(cleaned1)
|
553 |
+
par = []
|
554 |
+
for idx,answer in enumerate(cleaned1[:-1]):
|
555 |
+
par.append(cosine_similarity([embeddings[-1]],[embeddings[idx]]))
|
556 |
+
par_similarities2 = []
|
557 |
+
for answer in par:
|
558 |
+
par_similarities2.append(float(answer[0][0]))
|
559 |
+
|
560 |
+
chart_participacion2 = pd.DataFrame({
|
561 |
+
'x':index,
|
562 |
+
'y':par_similarities2
|
563 |
+
})
|
564 |
+
|
565 |
+
p2 = alt.Chart(chart_participacion2).mark_area().encode(
|
566 |
+
x=alt.X('x', title="Semanas"),
|
567 |
+
y=alt.Y('y', title=""),
|
568 |
+
color=alt.value("#33cc33")
|
569 |
+
)
|
570 |
+
|
571 |
+
person2_all_text.pop()
|
572 |
+
|
573 |
+
#compromiso
|
574 |
+
|
575 |
+
person2_all_text.append(compromiso)
|
576 |
+
cleaned1 = list(map(clean_string, person2_all_text))
|
577 |
+
embeddings = modelS.encode(cleaned1)
|
578 |
+
com = []
|
579 |
+
for idx,answer in enumerate(cleaned1[:-1]):
|
580 |
+
com.append(cosine_similarity([embeddings[-1]],[embeddings[idx]]))
|
581 |
+
com_similarities2 = []
|
582 |
+
for answer in com:
|
583 |
+
com_similarities2.append(float(answer[0][0]))
|
584 |
+
|
585 |
+
chart_compromiso2 = pd.DataFrame({
|
586 |
+
'x':index,
|
587 |
+
'y':com_similarities2
|
588 |
+
})
|
589 |
+
|
590 |
+
c2 = alt.Chart(chart_compromiso2).mark_area().encode(
|
591 |
+
x=alt.X('x', title="Semanas"),
|
592 |
+
y=alt.Y('y', title=""),
|
593 |
+
color=alt.value("#ff6600")
|
594 |
+
)
|
595 |
+
|
596 |
+
person2_all_text.pop()
|
597 |
+
|
598 |
+
#graficas
|
599 |
+
st.header("Persona 1 (DAW)")
|
600 |
+
with st.container():
|
601 |
+
col1, col2 = st.columns(2, gap="large")
|
602 |
+
with col1:
|
603 |
+
st.text("Analisis de objetivos:")
|
604 |
+
st.altair_chart(o1, use_container_width=True)
|
605 |
+
with col2:
|
606 |
+
st.text("Word Cloud de objetivos:")
|
607 |
+
st.image(person1)
|
608 |
+
with st.container():
|
609 |
+
st.text("Sentimiento de dificultad:")
|
610 |
+
st.altair_chart(d1, use_container_width=True)
|
611 |
+
with st.container():
|
612 |
+
st.text("Sentimiento de utilidad:")
|
613 |
+
st.altair_chart(u1, use_container_width=True)
|
614 |
+
with st.container():
|
615 |
+
st.text("Analisis de emociones:")
|
616 |
+
st.altair_chart(chart, use_container_width=True)
|
617 |
+
with st.container():
|
618 |
+
st.text("Analisis de autonomia:")
|
619 |
+
st.altair_chart(a1, use_container_width=True)
|
620 |
+
with st.container():
|
621 |
+
st.text("Analisis de participacion:")
|
622 |
+
st.altair_chart(p1, use_container_width=True)
|
623 |
+
with st.container():
|
624 |
+
st.text("Analisis de compromiso:")
|
625 |
+
st.altair_chart(c1, use_container_width=True)
|
626 |
+
|
627 |
+
|
628 |
+
st.header("Persona 2 (MARK)")
|
629 |
+
with st.container():
|
630 |
+
col1, col2 = st.columns(2, gap="large")
|
631 |
+
with col1:
|
632 |
+
st.text("Analisis de objetivos:")
|
633 |
+
st.altair_chart(o2, use_container_width=True)
|
634 |
+
with col2:
|
635 |
+
st.text("Word Cloud de objetivos:")
|
636 |
+
st.image(person2)
|
637 |
+
with st.container():
|
638 |
+
st.text("Sentimiento de dificultad:")
|
639 |
+
st.altair_chart(d2, use_container_width=True)
|
640 |
+
with st.container():
|
641 |
+
st.text("Sentimiento de utilidad:")
|
642 |
+
st.altair_chart(u2, use_container_width=True)
|
643 |
+
with st.container():
|
644 |
+
st.text("Analisis de emociones:")
|
645 |
+
st.altair_chart(chart2, use_container_width=True)
|
646 |
+
with st.container():
|
647 |
+
st.text("Analisis de autonomia:")
|
648 |
+
st.altair_chart(a2, use_container_width=True)
|
649 |
+
with st.container():
|
650 |
+
st.text("Analisis de participacion:")
|
651 |
+
st.altair_chart(p2, use_container_width=True)
|
652 |
+
with st.container():
|
653 |
+
st.text("Analisis de compromiso:")
|
654 |
+
st.altair_chart(c2, use_container_width=True)
|
autonomia.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Me siento capaz de tomar decisiones por mi cuenta en esta clase.
|
2 |
+
Siento que puedo elegir mi propio enfoque para aprender y estudiar.
|
3 |
+
Disfruto de tener la libertad de explorar mis propios intereses y preguntas en esta clase.
|
4 |
+
Me siento empoderado para buscar recursos adicionales y aprender más allá de lo que se enseña en la clase.
|
compromiso.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Me siento comprometido con mi aprendizaje y estoy dispuesto a hacer el esfuerzo necesario para tener éxito en esta clase.
|
2 |
+
Estoy motivado para hacer lo mejor que pueda en las tareas y trabajos de esta clase.
|
3 |
+
Siento que lo que aprendo en esta clase es relevante y útil para mi futuro.
|
4 |
+
Me siento responsable de mi propio aprendizaje y estoy comprometido a mantenerme al día con las tareas y el trabajo de la clase.
|
data.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
daw_obj.txt
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Un grado superior de desarrollo de aplicaciones web tiene como objetivo formar a profesionales capacitados para diseñar, desarrollar, implementar y mantener aplicaciones web, tanto en el ámbito empresarial como en el de los servicios y el comercio electrónico. Algunos de los objetivos específicos del grado superior en desarrollo de aplicaciones web pueden incluir:
|
2 |
+
|
3 |
+
Adquirir conocimientos técnicos en programación web y aplicaciones multimedia.
|
4 |
+
Desarrollar habilidades en la gestión de proyectos y el trabajo en equipo.
|
5 |
+
Conocer las principales tecnologías y herramientas utilizadas en el desarrollo web.
|
6 |
+
Adquirir conocimientos en bases de datos y sistemas de gestión de contenidos.
|
7 |
+
Desarrollar habilidades en el diseño y la arquitectura de aplicaciones web.
|
8 |
+
Adquirir conocimientos en seguridad informática y protección de datos personales.
|
9 |
+
Desarrollar habilidades en la resolución de problemas y la toma de decisiones.
|
10 |
+
En cuanto a las competencias que se esperan de los graduados en desarrollo de aplicaciones web, éstas pueden incluir:
|
11 |
+
|
12 |
+
Conocimientos técnicos en programación, diseño y arquitectura web.
|
13 |
+
Habilidad para trabajar en equipo y gestionar proyectos de desarrollo web.
|
14 |
+
Capacidad para adaptarse a nuevas tecnologías y herramientas de desarrollo web.
|
15 |
+
Habilidad para diseñar y desarrollar aplicaciones web eficientes y seguras.
|
16 |
+
Capacidad para analizar y resolver problemas en el desarrollo de aplicaciones web.
|
17 |
+
Habilidad para trabajar en entornos dinámicos y cambiantes.
|
18 |
+
Capacidad para comunicar y presentar de forma efectiva el trabajo desarrollado.
|
mark_obj.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Los objetivos de un grado superior de marketing pueden variar de una institución educativa a otra, pero en general se espera que los estudiantes adquieran conocimientos y habilidades en áreas como:
|
2 |
+
|
3 |
+
Fundamentos del marketing: los estudiantes deben aprender los conceptos básicos del marketing, incluyendo el análisis del mercado, la segmentación de mercado, el posicionamiento de productos, la mezcla de marketing, entre otros.
|
4 |
+
Comunicación de marketing: los estudiantes deben aprender a crear estrategias de comunicación efectivas, incluyendo publicidad, relaciones públicas, marketing directo y promociones de ventas.
|
5 |
+
Marketing digital: los estudiantes deben aprender a utilizar herramientas digitales para el marketing, incluyendo redes sociales, correo electrónico, SEO, SEM y análisis web.
|
6 |
+
Investigación de mercado: los estudiantes deben aprender a realizar investigaciones de mercado para conocer las necesidades y preferencias de los consumidores y para identificar oportunidades de mercado.
|
7 |
+
Marketing internacional: los estudiantes deben aprender a adaptar las estrategias de marketing a diferentes culturas y mercados internacionales.
|
8 |
+
|
9 |
+
En cuanto a las competencias, se espera que los estudiantes desarrollen habilidades como:
|
10 |
+
Análisis y toma de decisiones: los estudiantes deben aprender a analizar datos de mercado y a tomar decisiones basadas en ellos.
|
11 |
+
Creatividad: los estudiantes deben aprender a ser creativos en la creación de estrategias de marketing y en la resolución de problemas.
|
12 |
+
Trabajo en equipo: los estudiantes deben aprender a colaborar con otros miembros del equipo de marketing para lograr objetivos comunes.
|
13 |
+
Comunicación: los estudiantes deben aprender a comunicarse de manera efectiva con otros miembros del equipo y con los clientes.
|
14 |
+
Actualización constante: los estudiantes deben aprender a mantenerse actualizados sobre las últimas tendencias y herramientas en el marketing, especialmente en el entorno digital en constante cambio.
|
participacion.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Me gusta contribuir en las discusiones en clase y compartir mis ideas con los demás.
|
2 |
+
Siento que mis preguntas y comentarios son valorados por mi profesor y compañeros de clase.
|
3 |
+
Me siento cómodo compartiendo mis experiencias y perspectivas en los trabajos en grupo.
|
4 |
+
Me gusta participar en las actividades extracurriculares relacionadas con la clase para profundizar en mi aprendizaje.
|
person1.png
ADDED
person2.png
ADDED
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gunicorn==19.9.0
|
2 |
+
Flask==2.2.3
|
3 |
+
numpy==1.20.3
|
4 |
+
pandas==1.2.4
|
5 |
+
scikit-learn==0.24.1
|
6 |
+
streamlit==1.22.0
|
7 |
+
protobuf==3.20.2
|
8 |
+
nltk==3.8.1
|
9 |
+
transformers==4.28.1
|
10 |
+
sentence-transformers==2.2.2
|
11 |
+
stylecloud==0.5.2
|
12 |
+
stop-words==2018.7.23
|
13 |
+
pysentimiento==0.6.7
|
14 |
+
huggingface_hub==0.14.1
|