Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Upload app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ import time
|
|
18 |
import pandas as pd
|
19 |
import pickle
|
20 |
import numpy as np
|
21 |
-
|
22 |
# This repository's directory
|
23 |
REPO_DIR = Path(__file__).parent
|
24 |
subprocess.Popen(["uvicorn", "server:app"], cwd=REPO_DIR)
|
@@ -82,38 +82,50 @@ def keygen():
|
|
82 |
return [list(evaluation_key)[:ENCRYPTED_DATA_BROWSER_LIMIT], user_id]
|
83 |
|
84 |
|
85 |
-
def encode_quantize_encrypt(test_file,
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
fhe_api.load()
|
89 |
from PE_main import extract_infos
|
90 |
-
|
91 |
-
|
|
|
92 |
encodings = extract_infos(test_file)
|
93 |
-
|
|
|
|
|
|
|
94 |
|
95 |
quantized_encodings = fhe_api.model.quantize_input(encodings).astype(numpy.uint8)
|
96 |
encrypted_quantized_encoding = fhe_api.quantize_encrypt_serialize(encodings)
|
97 |
|
98 |
-
# Save encrypted_quantized_encoding in a file, since too large to pass through regular Gradio
|
99 |
-
# buttons, https://github.com/gradio-app/gradio/issues/1877
|
100 |
numpy.save(
|
101 |
-
f"tmp/tmp_encrypted_quantized_encoding_{
|
102 |
encrypted_quantized_encoding,
|
103 |
)
|
104 |
|
105 |
# Compute size
|
106 |
-
encrypted_quantized_encoding_shorten = list(encrypted_quantized_encoding)[
|
107 |
-
|
108 |
-
]
|
109 |
-
encrypted_quantized_encoding_shorten_hex = "".join(
|
110 |
-
f"{i:02x}" for i in encrypted_quantized_encoding_shorten
|
111 |
-
)
|
112 |
-
return (
|
113 |
-
encodings[0],
|
114 |
-
quantized_encodings[0],
|
115 |
-
encrypted_quantized_encoding_shorten_hex,
|
116 |
-
)
|
117 |
|
118 |
|
119 |
def run_fhe(user_id):
|
@@ -124,9 +136,8 @@ def run_fhe(user_id):
|
|
124 |
evaluation_key = numpy.load(f"tmp/tmp_evaluation_key_{user_id}.npy")
|
125 |
|
126 |
# Use base64 to encode the encodings and evaluation key
|
127 |
-
encrypted_quantized_encoding = base64.b64encode(
|
128 |
-
|
129 |
-
).decode()
|
130 |
encoded_evaluation_key = base64.b64encode(evaluation_key).decode()
|
131 |
|
132 |
query = {}
|
@@ -134,23 +145,19 @@ def run_fhe(user_id):
|
|
134 |
query["encrypted_encoding"] = encrypted_quantized_encoding
|
135 |
headers = {"Content-type": "application/json"}
|
136 |
|
137 |
-
|
138 |
response = requests.post(
|
139 |
"http://localhost:8000/predict",
|
140 |
data=json.dumps(query),
|
141 |
headers=headers,
|
142 |
)
|
143 |
|
144 |
-
|
145 |
encrypted_prediction = base64.b64decode(response.json()["encrypted_prediction"])
|
146 |
|
147 |
numpy.save(f"tmp/tmp_encrypted_prediction_{user_id}.npy", encrypted_prediction)
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
encrypted_prediction_shorten_hex
|
152 |
-
f"{i:02x}" for i in encrypted_prediction_shorten
|
153 |
-
)
|
154 |
|
155 |
|
156 |
def decrypt_prediction(user_id):
|
@@ -167,20 +174,23 @@ def decrypt_prediction(user_id):
|
|
167 |
fhe_api.generate_private_and_evaluation_keys(force=False)
|
168 |
|
169 |
predictions = fhe_api.deserialize_decrypt_dequantize(encrypted_prediction)
|
|
|
170 |
|
171 |
|
172 |
|
173 |
def process_pipeline(test_file):
|
|
|
174 |
eval_key = keygen()
|
175 |
encodings = encode_quantize_encrypt(test_file, eval_key)
|
176 |
-
encrypted_quantized_encoding = run_fhe(
|
177 |
-
encrypted_prediction = decrypt_prediction(
|
178 |
|
179 |
return eval_key, encodings, encrypted_quantized_encoding, encrypted_prediction
|
180 |
|
181 |
if __name__ == "__main__":
|
182 |
-
|
183 |
app = gr.Interface(
|
|
|
184 |
fn=process_pipeline,
|
185 |
inputs=[
|
186 |
gr.File(label="Test File"),
|
|
|
18 |
import pandas as pd
|
19 |
import pickle
|
20 |
import numpy as np
|
21 |
+
import pdb
|
22 |
# This repository's directory
|
23 |
REPO_DIR = Path(__file__).parent
|
24 |
subprocess.Popen(["uvicorn", "server:app"], cwd=REPO_DIR)
|
|
|
82 |
return [list(evaluation_key)[:ENCRYPTED_DATA_BROWSER_LIMIT], user_id]
|
83 |
|
84 |
|
85 |
+
def encode_quantize_encrypt(test_file, eval_key):
|
86 |
+
ugly = ['Machine', 'SizeOfOptionalHeader', 'Characteristics',
|
87 |
+
'MajorLinkerVersion', 'MinorLinkerVersion', 'SizeOfCode',
|
88 |
+
'SizeOfInitializedData', 'SizeOfUninitializedData',
|
89 |
+
'AddressOfEntryPoint', 'BaseOfCode', 'BaseOfData', 'ImageBase',
|
90 |
+
'SectionAlignment', 'FileAlignment', 'MajorOperatingSystemVersion',
|
91 |
+
'MinorOperatingSystemVersion', 'MajorImageVersion', 'MinorImageVersion',
|
92 |
+
'MajorSubsystemVersion', 'MinorSubsystemVersion', 'SizeOfImage',
|
93 |
+
'SizeOfHeaders', 'CheckSum', 'Subsystem', 'DllCharacteristics',
|
94 |
+
'SizeOfStackReserve', 'SizeOfStackCommit', 'SizeOfHeapReserve',
|
95 |
+
'SizeOfHeapCommit', 'LoaderFlags', 'NumberOfRvaAndSizes', 'SectionsNb',
|
96 |
+
'SectionsMeanEntropy', 'SectionsMinEntropy', 'SectionsMaxEntropy',
|
97 |
+
'SectionsMeanRawsize', 'SectionsMinRawsize',
|
98 |
+
'SectionsMeanVirtualsize', 'SectionsMinVirtualsize',
|
99 |
+
'SectionMaxVirtualsize', 'ImportsNbDLL', 'ImportsNb',
|
100 |
+
'ImportsNbOrdinal', 'ExportNb', 'ResourcesNb', 'ResourcesMeanEntropy',
|
101 |
+
'ResourcesMinEntropy', 'ResourcesMaxEntropy', 'ResourcesMeanSize',
|
102 |
+
'ResourcesMinSize', 'ResourcesMaxSize', 'LoadConfigurationSize',
|
103 |
+
'VersionInformationSize']
|
104 |
+
|
105 |
+
fhe_api = FHEModelClient(f"fhe_model", f".fhe_keys/{eval_key}")
|
106 |
fhe_api.load()
|
107 |
from PE_main import extract_infos
|
108 |
+
# expect [1, 53] but we get (53)
|
109 |
+
# pdb.set_trace()
|
110 |
+
# features = pickle.loads(open(os.path.join("features.pkl"), "rb").read())
|
111 |
encodings = extract_infos(test_file)
|
112 |
+
|
113 |
+
encodings = list(map(lambda x: encodings[x], ugly))
|
114 |
+
|
115 |
+
encodings = np.array(encodings).reshape(1, -1)
|
116 |
|
117 |
quantized_encodings = fhe_api.model.quantize_input(encodings).astype(numpy.uint8)
|
118 |
encrypted_quantized_encoding = fhe_api.quantize_encrypt_serialize(encodings)
|
119 |
|
|
|
|
|
120 |
numpy.save(
|
121 |
+
f"tmp/tmp_encrypted_quantized_encoding_{eval_key[1]}.npy",
|
122 |
encrypted_quantized_encoding,
|
123 |
)
|
124 |
|
125 |
# Compute size
|
126 |
+
encrypted_quantized_encoding_shorten = list(encrypted_quantized_encoding)[:ENCRYPTED_DATA_BROWSER_LIMIT]
|
127 |
+
encrypted_quantized_encoding_shorten_hex = "".join(f"{i:02x}" for i in encrypted_quantized_encoding_shorten)
|
128 |
+
return (encodings[0],quantized_encodings[0],encrypted_quantized_encoding_shorten_hex)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
|
131 |
def run_fhe(user_id):
|
|
|
136 |
evaluation_key = numpy.load(f"tmp/tmp_evaluation_key_{user_id}.npy")
|
137 |
|
138 |
# Use base64 to encode the encodings and evaluation key
|
139 |
+
encrypted_quantized_encoding = base64.b64encode(encrypted_quantized_encoding).decode()
|
140 |
+
|
|
|
141 |
encoded_evaluation_key = base64.b64encode(evaluation_key).decode()
|
142 |
|
143 |
query = {}
|
|
|
145 |
query["encrypted_encoding"] = encrypted_quantized_encoding
|
146 |
headers = {"Content-type": "application/json"}
|
147 |
|
|
|
148 |
response = requests.post(
|
149 |
"http://localhost:8000/predict",
|
150 |
data=json.dumps(query),
|
151 |
headers=headers,
|
152 |
)
|
153 |
|
|
|
154 |
encrypted_prediction = base64.b64decode(response.json()["encrypted_prediction"])
|
155 |
|
156 |
numpy.save(f"tmp/tmp_encrypted_prediction_{user_id}.npy", encrypted_prediction)
|
157 |
+
|
158 |
+
encrypted_prediction_shorten = list(encrypted_prediction)[:ENCRYPTED_DATA_BROWSER_LIMIT]
|
159 |
+
encrypted_prediction_shorten_hex = "".join(f"{i:02x}" for i in encrypted_prediction_shorten)
|
160 |
+
return encrypted_prediction_shorten_hex
|
|
|
|
|
161 |
|
162 |
|
163 |
def decrypt_prediction(user_id):
|
|
|
174 |
fhe_api.generate_private_and_evaluation_keys(force=False)
|
175 |
|
176 |
predictions = fhe_api.deserialize_decrypt_dequantize(encrypted_prediction)
|
177 |
+
return predictions
|
178 |
|
179 |
|
180 |
|
181 |
def process_pipeline(test_file):
|
182 |
+
|
183 |
eval_key = keygen()
|
184 |
encodings = encode_quantize_encrypt(test_file, eval_key)
|
185 |
+
encrypted_quantized_encoding = run_fhe(eval_key[1])
|
186 |
+
encrypted_prediction = decrypt_prediction(eval_key[1])
|
187 |
|
188 |
return eval_key, encodings, encrypted_quantized_encoding, encrypted_prediction
|
189 |
|
190 |
if __name__ == "__main__":
|
191 |
+
|
192 |
app = gr.Interface(
|
193 |
+
|
194 |
fn=process_pipeline,
|
195 |
inputs=[
|
196 |
gr.File(label="Test File"),
|