hongaik commited on
Commit
b2f1c3e
1 Parent(s): c8aa769
Files changed (2) hide show
  1. .ipynb_checkpoints/utils-checkpoint.py +78 -0
  2. utils.py +4 -3
.ipynb_checkpoints/utils-checkpoint.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import pickle
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+
7
+ tfidf = pickle.load(open('models/tfidf.sav', 'rb'))
8
+ svc_sentiment = pickle.load(open('models/sentiment_model.sav', 'rb'))
9
+ tfidf_sentiment = pickle.load(open('models/tfidf_sentiment.sav', 'rb'))
10
+ svc = pickle.load(open('models/svc_model.sav', 'rb'))
11
+
12
+ labels = [
13
+ 'Product quality', 'Knowledge',
14
+ 'Appointment', 'Service etiquette', 'Waiting time',
15
+ 'Repair speed', 'Repair cost', 'Repair quality', 'Warranty',
16
+ 'Product replacement', 'Loan sets']
17
+
18
+ sample_file = pd.read_csv('sample.csv').to_csv(index=False).encode('utf-8')
19
+
20
+ print('utils imported!')
21
+
22
+ def get_single_prediction(text):
23
+
24
+ # manipulate data into a format that we pass to our model
25
+ text = text.lower().strip() #lower case
26
+
27
+ # Vectorise text and store in new dataframe. Sentence vector = average of word vectors
28
+ text_vectors = tfidf.transform(list(text))
29
+ print('check1')
30
+ # Make topic predictions
31
+ results = svc.predict_proba(text_vectors).squeeze().round(2)
32
+ print('check2')
33
+ pred_prob = pd.DataFrame({'topic': labels, 'probability': results}).sort_values('probability', ascending=True)
34
+ print('check3')
35
+ # Make sentiment predictions
36
+ text_vectors_sentiment = tfidf_sentiment.transform(list(text))
37
+ print('check4')
38
+ results_sentiment = svc_sentiment.predict_proba(text_vectors).squeeze().round(2)
39
+ pred_prob_sentiment = pd.DataFrame({'sentiment': ['Negative', 'Positive'], 'probability': results_sentiment}).sort_values('probability', ascending=True)
40
+
41
+
42
+ return (pred_prob, pred_prob_sentiment)
43
+
44
+ def get_multiple_predictions(csv):
45
+
46
+ df = pd.read_csv(csv)
47
+ df.columns = ['sequence']
48
+
49
+ df['sequence_clean'] = df['sequence'].str.lower().str.strip()
50
+
51
+ # Remove rows with blank string
52
+ invalid = df[(pd.isna(df['sequence_clean'])) | (df['sequence_clean'] == '')]
53
+ invalid.drop(columns=['sequence_clean'], inplace=True)
54
+
55
+ # Drop rows with blank string
56
+ df.dropna(inplace=True)
57
+ df = df[df['sequence_clean'] != ''].reset_index(drop=True)
58
+
59
+ # Vectorise text and get topic predictions
60
+ text_vectors = tfidf.transform(df['sequence_clean'])
61
+ pred_results = pd.DataFrame(svc.predict(text_vectors), columns = labels)
62
+
63
+ # Vectorise text and get sentiment predictions
64
+ text_vectors_sentiment = tfidf_sentiment.transform(df['sequence_clean'])
65
+ pred_results_sentiment = pd.DataFrame(svc_sentiment.predict(text_vectors_sentiment), columns = ['sentiment'])
66
+
67
+ # Join back to original sequence
68
+ final_results = df.join(pred_results).join(pred_results_sentiment)
69
+ final_results['others'] = final_results[labels].max(axis=1)
70
+ final_results['others'] = final_results['others'].apply(lambda x: 1 if x == 0 else 0)
71
+
72
+ final_results.drop(columns=['sequence_clean'], inplace=True)
73
+
74
+ # Append invalid rows
75
+ if len(invalid) == 0:
76
+ return final_results.to_csv(index=False).encode('utf-8')
77
+ else:
78
+ return pd.concat([final_results, invalid]).reset_index(drop=True).to_csv(index=False).encode('utf-8')
utils.py CHANGED
@@ -26,14 +26,15 @@ def get_single_prediction(text):
26
 
27
  # Vectorise text and store in new dataframe. Sentence vector = average of word vectors
28
  text_vectors = tfidf.transform(list(text))
29
-
30
  # Make topic predictions
31
  results = svc.predict_proba(text_vectors).squeeze().round(2)
 
32
  pred_prob = pd.DataFrame({'topic': labels, 'probability': results}).sort_values('probability', ascending=True)
33
-
34
  # Make sentiment predictions
35
  text_vectors_sentiment = tfidf_sentiment.transform(list(text))
36
-
37
  results_sentiment = svc_sentiment.predict_proba(text_vectors).squeeze().round(2)
38
  pred_prob_sentiment = pd.DataFrame({'sentiment': ['Negative', 'Positive'], 'probability': results_sentiment}).sort_values('probability', ascending=True)
39
 
 
26
 
27
  # Vectorise text and store in new dataframe. Sentence vector = average of word vectors
28
  text_vectors = tfidf.transform(list(text))
29
+ print('check1')
30
  # Make topic predictions
31
  results = svc.predict_proba(text_vectors).squeeze().round(2)
32
+ print('check2')
33
  pred_prob = pd.DataFrame({'topic': labels, 'probability': results}).sort_values('probability', ascending=True)
34
+ print('check3')
35
  # Make sentiment predictions
36
  text_vectors_sentiment = tfidf_sentiment.transform(list(text))
37
+ print('check4')
38
  results_sentiment = svc_sentiment.predict_proba(text_vectors).squeeze().round(2)
39
  pred_prob_sentiment = pd.DataFrame({'sentiment': ['Negative', 'Positive'], 'probability': results_sentiment}).sort_values('probability', ascending=True)
40