File size: 2,913 Bytes
9a997e4
 
 
 
 
 
 
a241bb3
b47829b
a241bb3
b47829b
8e0d56d
18ba8c1
8e0d56d
 
a241bb3
8e0d56d
a241bb3
9a997e4
615cfe4
a241bb3
9a997e4
 
b47829b
74c0c8e
 
 
 
 
 
 
 
9a997e4
8e0d56d
b47829b
8e0d56d
9a997e4
 
74c0c8e
9a997e4
 
18ba8c1
9a997e4
 
 
 
18ba8c1
9a997e4
 
8e0d56d
a241bb3
8e0d56d
9a997e4
 
8e0d56d
9a997e4
8e0d56d
18ba8c1
8e0d56d
9a997e4
8e0d56d
9a997e4
 
 
 
b47829b
9a997e4
b47829b
9a997e4
b47829b
9a997e4
b47829b
9a997e4
b0303a0
 
9a997e4
b47829b
 
9a997e4
ec21179
b47829b
 
9a997e4
 
18ba8c1
8e0d56d
18ba8c1
8e0d56d
18ba8c1
8e0d56d
18ba8c1
8e0d56d
9a997e4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
"""Train and compile the model."""

import shutil
import numpy
import pandas
import pickle

from settings import (
    DEPLOYMENT_PATH,
    DATA_PATH, 
    INPUT_SLICES, 
    PRE_PROCESSOR_APPLICANT_PATH, 
    PRE_PROCESSOR_BANK_PATH,
    PRE_PROCESSOR_CREDIT_BUREAU_PATH,
    APPLICANT_COLUMNS,
    BANK_COLUMNS,
    CREDIT_BUREAU_COLUMNS,
)
from utils.client_server_interface import MultiInputsFHEModelDev
from utils.model import MultiInputDecisionTreeClassifier
from utils.pre_processing import get_pre_processors


def get_multi_inputs(data):
    """Get inputs for all three parties from the input data, using fixed slices.
    
    Args:
        data (numpy.ndarray): The input data to consider.
    
    Returns:
        (Tuple[numpy.ndarray]): The inputs for all three parties.
    """
    return (
        data[:, INPUT_SLICES["applicant"]], 
        data[:, INPUT_SLICES["bank"]], 
        data[:, INPUT_SLICES["credit_bureau"]]
    )


print("Load and pre-process the data")

# Load the data
data = pandas.read_csv(DATA_PATH, encoding="utf-8")

# Define input and target data
data_x = data.copy()
data_y = data_x.pop("Target").copy().to_frame()

# Get data from all parties
data_applicant = data_x[APPLICANT_COLUMNS].copy()
data_bank = data_x[BANK_COLUMNS].copy()
data_credit_bureau = data_x[CREDIT_BUREAU_COLUMNS].copy()

# Feature engineer the data
pre_processor_applicant, pre_processor_bank, pre_processor_credit_bureau = get_pre_processors()

preprocessed_data_applicant = pre_processor_applicant.fit_transform(data_applicant)
preprocessed_data_bank = pre_processor_bank.fit_transform(data_bank)
preprocessed_data_credit_bureau = pre_processor_credit_bureau.fit_transform(data_credit_bureau)

preprocessed_data_x = numpy.concatenate((preprocessed_data_applicant, preprocessed_data_bank, preprocessed_data_credit_bureau), axis=1)


print("\nTrain and compile the model")

model = MultiInputDecisionTreeClassifier()

model, sklearn_model = model.fit_benchmark(preprocessed_data_x, data_y)
 
multi_inputs_train = get_multi_inputs(preprocessed_data_x)

model.compile(*multi_inputs_train, inputs_encryption_status=["encrypted", "encrypted", "encrypted"])

print("\nSave deployment files")

# Delete the deployment folder and its content if it already exists
if DEPLOYMENT_PATH.is_dir():
    shutil.rmtree(DEPLOYMENT_PATH)

# Save files needed for deployment (and enable cross-platform deployment)
fhe_model_dev = MultiInputsFHEModelDev(DEPLOYMENT_PATH, model)
fhe_model_dev.save(via_mlir=True)

# Save pre-processors
with (
    PRE_PROCESSOR_APPLICANT_PATH.open('wb') as file_applicant, 
    PRE_PROCESSOR_BANK_PATH.open('wb') as file_bank,
    PRE_PROCESSOR_CREDIT_BUREAU_PATH.open('wb') as file_credit_bureau,
):
    pickle.dump(pre_processor_applicant, file_applicant)
    pickle.dump(pre_processor_bank, file_bank)
    pickle.dump(pre_processor_credit_bureau, file_credit_bureau)

print("\nDone !")