pushpikaLiyanagama commited on
Commit
ecc3892
1 Parent(s): 7f0c1ee

Upload 8 files

Browse files
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+
3
+ import joblib
4
+ import pandas as pd
5
+ import gradio as gr
6
+
7
+ # Load the scaler and models
8
+ scaler = joblib.load("models/scaler.joblib")
9
+ models = {
10
+ "processing": joblib.load("models/svm_model_processing.joblib"),
11
+ "perception": joblib.load("models/svm_model_perception.joblib"),
12
+ "input": joblib.load("models/svm_model_input.joblib"),
13
+ "understanding": joblib.load("models/svm_model_understanding.joblib")
14
+ }
15
+
16
+ def predict(course_overview, reading_file, abstract_materiale, concrete_material, visual_materials,
17
+ self_assessment, exercises_submit, quiz_submitted, playing, paused, unstarted, buffering):
18
+ try:
19
+ input_data = {
20
+ "course_overview": [course_overview],
21
+ "reading_file": [reading_file],
22
+ "abstract_materiale": [abstract_materiale],
23
+ "concrete_material": [concrete_material],
24
+ "visual_materials": [visual_materials],
25
+ "self_assessment": [self_assessment],
26
+ "exercises_submit": [exercises_submit],
27
+ "quiz_submitted": [quiz_submitted],
28
+ "playing": [playing],
29
+ "paused": [paused],
30
+ "unstarted": [unstarted],
31
+ "buffering": [buffering]
32
+ }
33
+
34
+ input_df = pd.DataFrame(input_data)
35
+ input_scaled = scaler.transform(input_df)
36
+
37
+ predictions = {}
38
+ for target, model in models.items():
39
+ pred = model.predict(input_scaled)
40
+ predictions[target] = int(pred[0])
41
+
42
+ return predictions
43
+
44
+ except Exception as e:
45
+ return {"error": str(e)}
46
+
47
+ # Define Gradio interface
48
+ iface = gr.Interface(
49
+ fn=predict,
50
+ inputs=[
51
+ gr.inputs.Number(label="Course Overview"),
52
+ gr.inputs.Number(label="Reading File"),
53
+ gr.inputs.Number(label="Abstract Materiale"),
54
+ gr.inputs.Number(label="Concrete Material"),
55
+ gr.inputs.Number(label="Visual Materials"),
56
+ gr.inputs.Number(label="Self Assessment"),
57
+ gr.inputs.Number(label="Exercises Submit"),
58
+ gr.inputs.Number(label="Quiz Submitted"),
59
+ gr.inputs.Number(label="Playing"),
60
+ gr.inputs.Number(label="Paused"),
61
+ gr.inputs.Number(label="Unstarted"),
62
+ gr.inputs.Number(label="Buffering")
63
+ ],
64
+ outputs="json",
65
+ title="SVM Multi-Target Prediction",
66
+ description="Enter the feature values to get predictions for processing, perception, input, and understanding."
67
+ )
68
+
69
+ if __name__ == "__main__":
70
+ iface.launch()
inference.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # inference.py
2
+
3
+ import joblib
4
+ import pandas as pd
5
+ from fastapi import FastAPI, HTTPException
6
+ from pydantic import BaseModel
7
+ from typing import List
8
+
9
+ app = FastAPI()
10
+
11
+ # Load the scaler and models at startup
12
+ scaler = joblib.load("models/scaler.joblib")
13
+ models = {
14
+ "processing": joblib.load("models/svm_model_processing.joblib"),
15
+ "perception": joblib.load("models/svm_model_perception.joblib"),
16
+ "input": joblib.load("models/svm_model_input.joblib"),
17
+ "understanding": joblib.load("models/svm_model_understanding.joblib")
18
+ }
19
+
20
+ # Define the input schema
21
+ class InputData(BaseModel):
22
+ course_overview: float
23
+ reading_file: float
24
+ abstract_materiale: float
25
+ concrete_material: float
26
+ visual_materials: float
27
+ self_assessment: float
28
+ exercises_submit: float
29
+ quiz_submitted: float
30
+ playing: float
31
+ paused: float
32
+ unstarted: float
33
+ buffering: float
34
+
35
+ class PredictionResponse(BaseModel):
36
+ processing: int
37
+ perception: int
38
+ input: int
39
+ understanding: int
40
+
41
+ @app.post("/predict", response_model=PredictionResponse)
42
+ def predict(data: InputData):
43
+ try:
44
+ # Convert input data to DataFrame
45
+ input_df = pd.DataFrame([data.dict()])
46
+
47
+ # If there are categorical variables that were one-hot encoded during training,
48
+ # ensure that input data matches the training data's dummy variables.
49
+ # For simplicity, assuming all inputs are numerical and match the training features.
50
+
51
+ # Scale the input
52
+ input_scaled = scaler.transform(input_df)
53
+
54
+ # Make predictions for each target
55
+ predictions = {}
56
+ for target, model in models.items():
57
+ pred = model.predict(input_scaled)
58
+ predictions[target] = int(pred[0])
59
+
60
+ return predictions
61
+
62
+ except Exception as e:
63
+ raise HTTPException(status_code=500, detail=str(e))
models/scaler.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d40f613f3b8b7bd9b51dc6b13631dd07ebdf6b373e41e6c5fd9d7cf20af814d
3
+ size 1431
models/svm_model_input.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:424a08119df19f4109d23e0d5f17084f0d3520c6a3c7eb2b137290dbf07e8d41
3
+ size 34539
models/svm_model_perception.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39e1f950e7427bde99c6de38bb977fb21217b36b1fa0eec4a4f68b2f58b0a99a
3
+ size 30139
models/svm_model_processing.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f6e8e1f74fb2d1f78144fd1cc820aa3af538459b07429d2e82abb9a3d2e5a2d
3
+ size 68651
models/svm_model_understanding.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dac2d4345f1e12fb913694390fa16a77ab781ad209e8433db4dc98d3d132423
3
+ size 53451
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ pandas
2
+ joblib
3
+ fastapi
4
+ uvicorn
5
+ scikit-learn==1.5.2