davidlee1102 commited on
Commit
5f275da
1 Parent(s): 3b096e3

Add User_Logs For Group Coursework

Browse files
__pycache__/constance_data.cpython-39.pyc ADDED
Binary file (858 Bytes). View file
 
__pycache__/pre_processing_data.cpython-39.pyc ADDED
Binary file (2.89 kB). View file
 
app.py CHANGED
@@ -1,12 +1,17 @@
1
  import streamlit as st
2
 
3
  from emotion_model import emotion_predict
 
4
 
5
  name = st.text_input("Enter your sentence here")
6
  if (st.button('Submit')):
7
  result = name.title()
8
  try:
9
  result_check = emotion_predict(result)
 
 
 
 
10
  except Exception as E:
11
  result_check = "Error"
12
  print(E)
 
1
  import streamlit as st
2
 
3
  from emotion_model import emotion_predict
4
+ from pre_processing_data import user_capture
5
 
6
  name = st.text_input("Enter your sentence here")
7
  if (st.button('Submit')):
8
  result = name.title()
9
  try:
10
  result_check = emotion_predict(result)
11
+ try:
12
+ user_capture(result, result_check)
13
+ except Exception as E:
14
+ print(E)
15
  except Exception as E:
16
  result_check = "Error"
17
  print(E)
pre_processing_data.py CHANGED
@@ -3,7 +3,9 @@ import spacy
3
  import nltk
4
  import pickle
5
  import subprocess
 
6
 
 
7
  from nltk.corpus import stopwords
8
  from nltk.tokenize import RegexpTokenizer
9
  from keras_preprocessing.sequence import pad_sequences
@@ -82,3 +84,15 @@ def preprocessing_data(string_text):
82
  string_output = [w for w in string_output if not w in stop_words]
83
  string_output = " ".join(string_output)
84
  return string_output
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import nltk
4
  import pickle
5
  import subprocess
6
+ import pandas as pd
7
 
8
+ from datetime import datetime
9
  from nltk.corpus import stopwords
10
  from nltk.tokenize import RegexpTokenizer
11
  from keras_preprocessing.sequence import pad_sequences
 
84
  string_output = [w for w in string_output if not w in stop_words]
85
  string_output = " ".join(string_output)
86
  return string_output
87
+
88
+
89
+ def user_capture(user_input, emotion_predict):
90
+ dataframe_capture = pd.read_csv('user_logs.csv')
91
+ user_input_logs = pd.DataFrame({
92
+ "user_input": [user_input],
93
+ "emotion_predict": [emotion_predict],
94
+ "time_logs": [datetime.now()],
95
+ })
96
+
97
+ dataframe_capture = pd.concat([dataframe_capture, user_input_logs], ignore_index=True)
98
+ dataframe_capture.to_csv("user_logs.csv", index=False)
user_logs.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ user_input,emotion_predict,time_logs
2
+ now you can see me as a manager,"""approval""",2023-05-04 18:57:29.276523
3
+ "I love you so much, babe","""love""",2023-05-04 18:57:46.411772
4
+ "I miss you so much, babe","""sadness""",2023-05-04 18:57:53.579631
5
+ "I likes your dress, babe","""neutral""",2023-05-04 18:58:15.954185
6
+ "I like your dress, babe","""neutral""",2023-05-04 18:58:26.430269
7
+ I dont think this is a good idea,"""gratitude""",2023-05-04 18:58:47.154434