rhemzypm commited on
Commit
cb3a755
1 Parent(s): b2ebe7b

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -0
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import joblib
4
+ import numpy as np
5
+ import pandas as pd
6
+ from transformers import AutoTokenizer, AutoModel
7
+
8
+ # Load IndoBERT tokenizer
9
+ tokenizer = AutoTokenizer.from_pretrained("indolem/indobert-base-uncased")
10
+
11
+ # Load IndoBERT model
12
+ model = AutoModel.from_pretrained("indolem/indobert-base-uncased")
13
+
14
+ # Mapping dictionaries for labels
15
+ priority_score_mapping = {1: "LOW", 2: "MEDIUM", 3: "HIGH"}
16
+ problem_domain_mapping = {0: "OPERATIONAL", 1: "TECHNICAL"}
17
+
18
+ # Load the trained Random Forest models
19
+ best_classifier1 = joblib.load('best_classifier1_optimized.pkl')
20
+ best_classifier2 = joblib.load('best_classifier2_optimized.pkl')
21
+
22
+ markdown_text = '''
23
+ ## Label Description
24
+ ### Priority Score
25
+ * **Low** label, means that the temporary/corrective solution can solve the problem. A permanent solution will be provided later because the impact on the business can still be handled.
26
+ * **Medium** label, means that there's a need to determine the time constraint to solve the problem. If it remains too long, it will impact the business side.
27
+ * **High** label, means that the problem is urgent and must be solved immediately.
28
+ ### Problem Domain
29
+ * **Operational** label, means that the scope of the problem is on the business or daily operational.
30
+ * **Technical** label, means that the scope of the problem is on the technical (technology) side like the mobile/web application.
31
+ '''
32
+
33
+ description="Write the feedback about the capsule hotel that you've ever visited or stayed there. The machine learning model will predict the priority score and problem domain of the feedback."
34
+
35
+ # Function to perform predictions
36
+ def predict(text):
37
+ # Convert the sentences into input features
38
+ encoded_inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt", max_length=128)
39
+
40
+ # Perform word embedding using IndoBERT model
41
+ with torch.no_grad():
42
+ outputs = model(**encoded_inputs)
43
+ embeddings = outputs.last_hidden_state
44
+
45
+ # Convert the embeddings to numpy array
46
+ embeddings = embeddings.numpy()
47
+
48
+ embeddings_custom_flat = embeddings.reshape(embeddings.shape[0], -1)
49
+
50
+ # Ensure mean_pooled_embeddings has exactly 768 features
51
+ num_features_expected = 768
52
+ if embeddings_custom_flat.shape[1] < num_features_expected:
53
+ # If the number of features is less than 768, pad the embeddings
54
+ pad_width = num_features_expected - embeddings_custom_flat.shape[1]
55
+ embeddings_custom_flat = np.pad(embeddings_custom_flat, ((0, 0), (0, pad_width)), mode='constant')
56
+
57
+ elif embeddings_custom_flat.shape[1] > num_features_expected:
58
+ # If the number of features is more than 768, truncate the embeddings
59
+ embeddings_custom_flat = embeddings_custom_flat[:, :num_features_expected]
60
+
61
+ # Predict the priority_score for the custom input
62
+ custom_priority_score = best_classifier1.predict(embeddings_custom_flat)
63
+
64
+ # Predict the problem_domain for the custom input
65
+ custom_problem_domain = best_classifier2.predict(embeddings_custom_flat)
66
+
67
+ # Map numerical labels to human-readable labels
68
+ mapped_priority_score = priority_score_mapping.get(custom_priority_score[0], "unknown")
69
+ mapped_problem_domain = problem_domain_mapping.get(custom_problem_domain[0], "unknown")
70
+
71
+ return f"Predicted Priority Score: {mapped_priority_score}, Predicted Problem Domain: {mapped_problem_domain}"
72
+
73
+
74
+ # Create a Gradio interface
75
+ gr.Interface(fn=predict, inputs="text", outputs="text", title="Simple Risk Classifier Demo (Case Study: Capsule Hotel)", description=description, article=markdown_text).launch(debug=True)