File size: 2,304 Bytes
25daa45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba3700b
 
25daa45
 
 
 
ba3700b
25daa45
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from handler import EndpointHandler
import numpy as np
import shutil

from pathlib import Path

from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split

from concrete.ml.sklearn import LogisticRegression
from concrete.ml.deployment import FHEModelClient, FHEModelDev

# Fit a model. In the future, we should find an existing model on HF repository
path_to_model = Path("compiled_model")
do_training_and_compilation = True

x, y = make_classification(n_samples=1000, class_sep=2, n_features=30, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)

if do_training_and_compilation:
    model_dev = LogisticRegression()
    model_dev.fit(X_train, y_train)

    # Compile into FHE
    model_dev.compile(X_train)

    # Saving the model
    shutil.rmtree(path_to_model, ignore_errors=True)
    fhemodel_dev = FHEModelDev(path_to_model, model_dev)
    fhemodel_dev.save(via_mlir=True)

# Init the handler (compilation of the model is done on HF side)
my_handler = EndpointHandler(path=".")

# Recover parameters for client side
fhemodel_client = FHEModelClient(path_to_model)

# Generate the keys
fhemodel_client.generate_private_and_evaluation_keys()
evaluation_keys = fhemodel_client.get_serialized_evaluation_keys()

# Test the handler
nb_good = 0
nb_samples = len(X_test)
verbose = False

for i in range(nb_samples):

    # Quantize the input and encrypt it
    encrypted_inputs = fhemodel_client.quantize_encrypt_serialize([X_test[i]])

    # Prepare the payload, including the evaluation keys which are needed server side
    payload = {
        "inputs": "fake",
        "encrypted_inputs": encrypted_inputs,
        "evaluation_keys": evaluation_keys,
    }

    # Run the inference on HF servers
    encrypted_prediction = my_handler(payload)
    encrypted_prediction = encrypted_prediction

    # Decrypt the result and dequantize
    prediction_proba = fhemodel_client.deserialize_decrypt_dequantize(encrypted_prediction)[0]
    prediction = np.argmax(prediction_proba)

    if verbose:
        print(f"for i-th input, {prediction=} with expected {y_test[i]}")

    # Measure accuracy
    nb_good += y_test[i] == prediction

print(f"Accuracy on {nb_samples} samples is {nb_good * 1. / nb_samples}")