micpst commited on
Commit
5451fa1
·
1 Parent(s): 07d84bc

add source files

Browse files
.fhe_key/.gitkeep ADDED
File without changes
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ tmp/
2
+ .venv
3
+ .fhe_keys
4
+ *.pyc
5
+ local_datasets/
6
+ .vscode/
app.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A gradio app. that runs locally (analytics=False and share=False) about sentiment analysis on tweets."""
2
+
3
+ import numpy as np
4
+ import gradio as gr
5
+ from concrete.ml.deployment import FHEModelClient
6
+ import numpy
7
+ import os
8
+ from pathlib import Path
9
+
10
+ import shutil
11
+ import torch
12
+
13
+ from model import Autoencoder
14
+ from concrete.ml.torch.compile import compile_torch_model
15
+
16
+ sequence_length = 50
17
+ input_size = 12
18
+ latent_size = 8
19
+ hidden_size = 64
20
+
21
+ ae_model = Autoencoder(
22
+ input_size=input_size,
23
+ hidden_size=hidden_size,
24
+ latent_size=latent_size,
25
+ sequence_length=sequence_length,
26
+ num_lstm_layers=1,
27
+ )
28
+
29
+ encoder = ae_model.encoder
30
+ encoder.load_state_dict(torch.load("deployment/encoder.pth", weights_only=True))
31
+
32
+ decoder = ae_model.decoder
33
+ decoder.load_state_dict(torch.load("deployment/decoder.pth", weights_only=True))
34
+
35
+ criterion = torch.nn.MSELoss()
36
+
37
+ dummy_input = torch.randn(1, latent_size)
38
+ compiled_decoder = compile_torch_model(
39
+ decoder,
40
+ dummy_input.numpy(),
41
+ n_bits=6,
42
+ rounding_threshold_bits={"n_bits": 6, "method": "approximate"},
43
+ )
44
+
45
+ # Encrypted data limit for the browser to display
46
+ # (encrypted data is too large to display in the browser)
47
+ ENCRYPTED_DATA_BROWSER_LIMIT = 100
48
+ N_USER_KEY_STORED = 20
49
+ FHE_MODEL_PATH = "deployment"
50
+
51
+ def clean_tmp_directory():
52
+ # Allow 20 user keys to be stored.
53
+ # Once that limitation is reached, deleted the oldest.
54
+ path_sub_directories = sorted([f for f in Path(".fhe_keys/").iterdir() if f.is_dir()], key=os.path.getmtime)
55
+
56
+ user_ids = []
57
+ if len(path_sub_directories) > N_USER_KEY_STORED:
58
+ n_files_to_delete = len(path_sub_directories) - N_USER_KEY_STORED
59
+ for p in path_sub_directories[:n_files_to_delete]:
60
+ user_ids.append(p.name)
61
+ shutil.rmtree(p)
62
+
63
+ list_files_tmp = Path("tmp/").iterdir()
64
+ # Delete all files related to user_id
65
+ for file in list_files_tmp:
66
+ for user_id in user_ids:
67
+ if file.name.endswith(f"{user_id}.npy"):
68
+ file.unlink()
69
+
70
+
71
+ def keygen():
72
+ # Clean tmp directory if needed
73
+ clean_tmp_directory()
74
+
75
+ print("Initializing FHEModelClient...")
76
+
77
+ # Let's create a user_id
78
+ user_id = numpy.random.randint(0, 2**32)
79
+ fhe_api = FHEModelClient(FHE_MODEL_PATH, f".fhe_keys/{user_id}")
80
+ fhe_api.load()
81
+
82
+ # Generate a fresh key
83
+ fhe_api.generate_private_and_evaluation_keys(force=True)
84
+ evaluation_key = fhe_api.get_serialized_evaluation_keys()
85
+
86
+ # Save evaluation_key in a file, since too large to pass through regular Gradio
87
+ # buttons, https://github.com/gradio-app/gradio/issues/1877
88
+ numpy.save(f"tmp/tmp_evaluation_key_{user_id}.npy", evaluation_key)
89
+
90
+ return [list(evaluation_key)[:ENCRYPTED_DATA_BROWSER_LIMIT], user_id]
91
+
92
+
93
+ def run_fhe(packets_ids, threshold=0.05):
94
+ int_values = np.array([int(h[0], 16) for h in packets_ids.split(" ")])
95
+ binary_rep = np.array([list(bin(x)[2:].zfill(12)) for x in int_values])
96
+ packets_ids = binary_rep.astype(float)
97
+ packets_ids = torch.tensor(packets_ids).unsqueeze(0).float()
98
+
99
+ latent = encoder(packets_ids)
100
+
101
+ with torch.no_grad(): # Disable gradient computation for validation
102
+ decrypted_output = compiled_decoder.forward(latent.numpy(), fhe="simulate")
103
+
104
+ decrypted_output = torch.tensor(decrypted_output).view(
105
+ -1, ae_model.sequence_length, packets_ids.size(2)
106
+ )
107
+
108
+ loss = criterion(decrypted_output, packets_ids)
109
+ pred = loss.item() > threshold
110
+
111
+ return [loss, pred]
112
+
113
+
114
+ demo = gr.Blocks()
115
+
116
+ with demo:
117
+ gr.Markdown(
118
+ """
119
+ <h1 align="center">CAN Bus Intrusion Detection With FHE</h1>
120
+
121
+ <p align="center">
122
+ <img src="https://ars.els-cdn.com/content/image/1-s2.0-S0167404824000786-gr001_lrg.jpg" width="60%" height="60%">
123
+ </p>
124
+ """
125
+ )
126
+
127
+ gr.Markdown("## Step 1: Generate the keys")
128
+
129
+ b_gen_key_and_install = gr.Button("Get the keys")
130
+
131
+ evaluation_key = gr.Textbox(
132
+ label="Evaluation key (truncated):",
133
+ max_lines=1,
134
+ interactive=False,
135
+ )
136
+
137
+ user_id = gr.Textbox(
138
+ label="",
139
+ max_lines=1,
140
+ interactive=False,
141
+ visible=False
142
+ )
143
+
144
+ gr.Markdown(
145
+ """
146
+ ## Step 2: Provide the packets ids
147
+ Enter a sensitive electronic control units (ECUs) communication packets from in-vehicle network (IVN).
148
+ """
149
+ )
150
+
151
+ packets_ids = gr.Textbox(
152
+ label="Packets ids",
153
+ info="Enter a sequence of 50 packets ids separated by a space",
154
+ max_lines=1,
155
+ )
156
+ gr.Examples(
157
+ label="Free attacks",
158
+ examples=[
159
+ "316 18F 260 2A0 329 545 002 153 2C0 130 131 140 350 43F 370 440 316 18F 260 2A0 329 4F0 545 430 4B1 1F1 153 002 2C0 350 130 131 140 370 43F 440 5F0 18F 260 2A0 316 329 545 002 153 2C0 130 131 140 350",
160
+ "329 4F0 545 430 4B1 1F1 153 002 2C0 350 130 131 140 370 43F 440 5F0 18F 260 2A0 316 329 545 002 153 2C0 130 131 140 350 43F 370 0A0 0A1 440 316 18F 260 2A0 329 4F0 545 430 4B1 1F1 153 002 2C0 350 130",
161
+ "316 329 545 002 153 2C0 130 131 140 350 43F 370 0A0 0A1 440 316 18F 260 2A0 329 4F0 545 430 4B1 1F1 153 002 2C0 350 130 131 140 370 43F 440 316 18F 260 2A0 329 545 002 153 2C0 130 131 140 350 43F 370",
162
+ ],
163
+ inputs=[packets_ids],
164
+ )
165
+ gr.Examples(
166
+ label="DoS attacks",
167
+ examples=[
168
+ "130 131 140 370 43F 440 316 18F 260 2A0 329 545 002 153 2C0 130 131 140 350 43F 370 440 316 18F 260 2A0 329 545 4F0 430 2C0 4B1 1F1 153 002 350 000 130 000 131 000 140 000 370 000 43F 000 440 000 000",
169
+ "370 440 316 18F 260 2A0 329 545 4F0 430 2C0 4B1 1F1 153 002 350 000 130 000 131 000 140 000 370 000 43F 000 440 000 000 000 18F 000 260 000 2A0 000 316 000 329 000 545 000 000 000 000 000 002 000 153",
170
+ "000 140 000 370 000 43F 000 440 000 000 000 18F 000 260 000 2A0 000 316 000 329 000 545 000 000 000 000 000 002 000 153 000 2C0 000 130 000 131 000 140 000 350 000 370 000 43F 000 440 000 000 000 316",
171
+ ],
172
+ inputs=[packets_ids],
173
+ )
174
+
175
+ gr.Markdown(
176
+ """
177
+ ## Step 3: Detect the attack using FHE
178
+ """
179
+ )
180
+ threshold = gr.Slider(0, 1, value=0.05, label="Threshold", info="Choose between 0 and 1")
181
+ b_detect = gr.Button("Detect", variant="primary")
182
+ prediction = gr.Textbox(
183
+ label="Prediction",
184
+ max_lines=1,
185
+ interactive=False,
186
+ )
187
+ loss = gr.Textbox(
188
+ label="Loss",
189
+ max_lines=1,
190
+ interactive=False,
191
+ )
192
+
193
+ b_gen_key_and_install.click(keygen, inputs=[], outputs=[evaluation_key, user_id])
194
+ b_detect.click(run_fhe, inputs=[packets_ids, threshold], outputs=[loss, prediction])
195
+
196
+ demo.launch(share=False)
deployment/.DS_Store ADDED
Binary file (6.15 kB). View file
 
deployment/client.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39a23f85f7f98e4b04114c89d97ab72713c7a55052eb506d204e95d47ecd706b
3
+ size 15847
deployment/decoder.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea6aa48a27e62b2e104e7f4bf947fd568b4e5fcda40b5b8386152d5641f3ac95
3
+ size 315743
deployment/encoder.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d0db46ddd2ac49415e8188db003ef0c8f65909e6a0707315c49e5d0ada60947
3
+ size 83799
deployment/server.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01729e3961179d0368cab360afaae330ea672852084da483921db56adf0e496d
3
+ size 26588
model.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+
4
+ class Encoder(nn.Module):
5
+ def __init__(self, input_size, hidden_size, latent_size, num_lstm_layers):
6
+ super(Encoder, self).__init__()
7
+
8
+ self.encoder_lstm = nn.LSTM(
9
+ input_size, hidden_size, num_lstm_layers, batch_first=True
10
+ )
11
+ self.latent = nn.Linear(hidden_size, latent_size)
12
+
13
+ def forward(self, x):
14
+ lstm_out, (h_n, c_n) = self.encoder_lstm(x)
15
+ h_last = lstm_out[:, -1, :]
16
+ latent = self.latent(h_last)
17
+
18
+ return latent
19
+
20
+ def encode(self, x):
21
+ lstm_out, _ = self.encoder_lstm(x)
22
+ h_last = lstm_out[:, -1, :]
23
+ latent = self.latent(h_last)
24
+ return latent
25
+
26
+
27
+ class Decoder(nn.Module):
28
+ def __init__(self, input_size, latent_size, sequence_length):
29
+ super(Decoder, self).__init__()
30
+
31
+ self.sequence_length = sequence_length
32
+
33
+ self.decoder_mlp = nn.Sequential(
34
+ nn.Linear(latent_size, 128),
35
+ nn.ReLU(),
36
+ nn.Linear(128, input_size * sequence_length),
37
+ )
38
+
39
+ def forward(self, x):
40
+
41
+ decoded = self.decoder_mlp(x)
42
+ return decoded
43
+
44
+
45
+ class Autoencoder(nn.Module):
46
+ def __init__(
47
+ self, input_size, hidden_size, latent_size, sequence_length, num_lstm_layers=1
48
+ ):
49
+ super(Autoencoder, self).__init__()
50
+
51
+ self.sequence_length = sequence_length
52
+ self.hidden_size = hidden_size
53
+
54
+ self.encoder = Encoder(input_size, hidden_size, latent_size, num_lstm_layers)
55
+ self.decoder = Decoder(input_size, latent_size, sequence_length)
56
+
57
+ def forward(self, x):
58
+ latent = self.encoder(x)
59
+ decoded = self.decoder(latent)
60
+ decoded = decoded.view(-1, self.sequence_length, x.size(2))
61
+
62
+ return decoded
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ concrete-ml==1.6.1
2
+ gradio