File size: 3,767 Bytes
b2f1c3e
 
 
 
 
efe6851
b2f1c3e
 
 
efe6851
 
b2f1c3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efe6851
b2f1c3e
efe6851
1964ce2
b2f1c3e
cceccd0
b2f1c3e
cceccd0
 
efe6851
b2f1c3e
 
efe6851
 
 
e22f095
efe6851
 
b2f1c3e
 
 
b52658c
b2f1c3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8297363
 
b2f1c3e
 
 
 
efe6851
 
 
 
b2f1c3e
 
efe6851
b2f1c3e
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import re
import pickle
import numpy as np
import pandas as pd

svc = pickle.load(open('models/svc_model.sav', 'rb'))
tfidf = pickle.load(open('models/tfidf.sav', 'rb'))
svc_sentiment = pickle.load(open('models/sentiment_model.sav', 'rb'))
tfidf_sentiment = pickle.load(open('models/tfidf_sentiment.sav', 'rb'))
svc_touchpoint = pickle.load(open('models/touchpoint_model.sav', 'rb'))
tfidf_touchpoint = pickle.load(open('models/tfidf_touchpoint.sav', 'rb'))

labels = [
       'Product quality', 'Knowledge',
       'Appointment', 'Service etiquette', 'Waiting time',
       'Repair speed', 'Repair cost', 'Repair quality', 'Warranty',
       'Product replacement', 'Loan sets']

sample_file = pd.read_csv('sample.csv').to_csv(index=False).encode('utf-8')

print('utils imported!')

def get_single_prediction(text):
    
    # manipulate data into a format that we pass to our model
    text = text.lower().strip() #lower case
    
    # Make topic predictions
    text_vectors = tfidf.transform([text])
    results = svc.predict_proba(text_vectors).squeeze().round(2)
    pred_prob = pd.DataFrame({'topic': labels, 'probability': results}).sort_values('probability', ascending=True)

    # Make sentiment predictions
    text_vectors_sentiment = tfidf_sentiment.transform([text])
 
    results_sentiment = svc_sentiment.predict_proba(text_vectors_sentiment).squeeze().round(2)
    pred_prob_sentiment = pd.DataFrame({'sentiment': ['Negative', 'Positive'], 'probability': results_sentiment}).sort_values('probability', ascending=True)
    
    # Make touchpoint predictions
    text_vectors_touchpoint = tfidf_touchpoint.transform([text])
    results_touchpoint = svc_touchpoint.predict_proba(text_vectors_touchpoint).squeeze().round(2)
    pred_prob_touchpoint = pd.DataFrame({'touchpoint': ['ASC', 'CC', 'No touchpoint', 'Technician'], 'probability': results_touchpoint}).sort_values('probability', ascending=True)
    
    return (pred_prob, pred_prob_sentiment, pred_prob_touchpoint)

def get_multiple_predictions(csv):
    
    df = pd.read_csv(csv, encoding='latin')
    df.columns = ['sequence']

    df['sequence_clean'] = df['sequence'].str.lower().str.strip()

    # Remove rows with blank string
    invalid = df[(pd.isna(df['sequence_clean'])) | (df['sequence_clean'] == '')]
    invalid.drop(columns=['sequence_clean'], inplace=True)
    
    # Drop rows with blank string
    df.dropna(inplace=True)
    df = df[df['sequence_clean'] != ''].reset_index(drop=True)
    
    # Vectorise text and get topic predictions 
    text_vectors = tfidf.transform(df['sequence_clean'])
    pred_results = pd.DataFrame(svc.predict(text_vectors), columns = labels)
    pred_results['others'] = pred_results[labels].max(axis=1)
    pred_results['others'] = pred_results['others'].apply(lambda x: 1 if x == 0 else 0)

    # Vectorise text and get sentiment predictions 
    text_vectors_sentiment = tfidf_sentiment.transform(df['sequence_clean'])
    pred_results_sentiment = pd.DataFrame(svc_sentiment.predict(text_vectors_sentiment), columns = ['sentiment'])    

    # Vectorise text and get touchpoint predictions 
    text_vectors_touchpoint = tfidf_touchpoint.transform(df['sequence_clean'])
    pred_results_touchpoint = pd.DataFrame(svc_touchpoint.predict(text_vectors_touchpoint), columns = ['touchpoint'])
    
    # Join back to original sequence
    final_results = df.join(pred_results).join(pred_results_sentiment).join(pred_results_touchpoint)
    
    final_results.drop(columns=['sequence_clean'], inplace=True)
    
    # Append invalid rows
    if len(invalid) == 0:
        return final_results.to_csv(index=False).encode('utf-8')
    else:
        return pd.concat([final_results, invalid]).reset_index(drop=True).to_csv(index=False).encode('utf-8')