Michael Ramos commited on
Commit
bd0f01a
1 Parent(s): 10713a6

schema revision, add sample

Browse files
frameworks/dasf/controls.json CHANGED
@@ -2,2134 +2,478 @@
2
  {
3
  "controlId": "DASF-1",
4
  "title": "SSO with IdP and MFA",
5
- "risks": [
6
- {
7
- "component": "Raw Data",
8
- "identifier": "1.1",
9
- "riskId": "raw-data-1.1",
10
- "summary": "Raw Data Risk 1.1"
11
- },
12
- {
13
- "component": "Data Prep",
14
- "identifier": "2.1",
15
- "riskId": "data-prep-2.1",
16
- "summary": "Data Prep Risk 2.1"
17
- },
18
- {
19
- "component": "Data Prep",
20
- "identifier": "2.2",
21
- "riskId": "data-prep-2.2",
22
- "summary": "Data Prep Risk 2.2"
23
- },
24
- {
25
- "component": "Data Prep",
26
- "identifier": "2.3",
27
- "riskId": "data-prep-2.3",
28
- "summary": "Data Prep Risk 2.3"
29
- },
30
- {
31
- "component": "Data Prep",
32
- "identifier": "2.4",
33
- "riskId": "data-prep-2.4",
34
- "summary": "Data Prep Risk 2.4"
35
- },
36
- {
37
- "component": "Datasets",
38
- "identifier": "3.1",
39
- "riskId": "datasets-3.1",
40
- "summary": "Datasets Risk 3.1"
41
- },
42
- {
43
- "component": "Evaluation",
44
- "identifier": "6.1",
45
- "riskId": "evaluation-6.1",
46
- "summary": "Evaluation Risk 6.1"
47
- },
48
- {
49
- "component": "Model",
50
- "identifier": "7.1",
51
- "riskId": "model-7.1",
52
- "summary": "Model Risk 7.1"
53
- },
54
- {
55
- "component": "Model",
56
- "identifier": "7.2",
57
- "riskId": "model-7.2",
58
- "summary": "Model Risk 7.2"
59
- },
60
- {
61
- "component": "Model Management",
62
- "identifier": "8.2",
63
- "riskId": "model-management-8.2",
64
- "summary": "Model Management Risk 8.2"
65
- },
66
- {
67
- "component": "Model Management",
68
- "identifier": "8.4",
69
- "riskId": "model-management-8.4",
70
- "summary": "Model Management Risk 8.4"
71
- },
72
- {
73
- "component": "Model Serving \u2014 Inference Requests",
74
- "identifier": "9.1",
75
- "riskId": "model-serving-inference-requests-9.1",
76
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
77
- },
78
- {
79
- "component": "Model Serving \u2014 Inference Requests",
80
- "identifier": "9.2",
81
- "riskId": "model-serving-inference-requests-9.2",
82
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
83
- },
84
- {
85
- "component": "Model Serving \u2014 Inference Requests",
86
- "identifier": "9.5",
87
- "riskId": "model-serving-inference-requests-9.5",
88
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
89
- },
90
- {
91
- "component": "Model Serving \u2014 Inference Requests",
92
- "identifier": "9.6",
93
- "riskId": "model-serving-inference-requests-9.6",
94
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
95
- },
96
- {
97
- "component": "Model Serving \u2014 Inference Requests",
98
- "identifier": "9.7",
99
- "riskId": "model-serving-inference-requests-9.7",
100
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
101
- },
102
- {
103
- "component": "Model Serving \u2014 Inference Requests",
104
- "identifier": "9.9",
105
- "riskId": "model-serving-inference-requests-9.9",
106
- "summary": "Model Serving \u2014 Inference Requests Risk 9.9"
107
- },
108
- {
109
- "component": "Model Serving \u2014 Inference Requests",
110
- "identifier": "9.10",
111
- "riskId": "model-serving-inference-requests-9.10",
112
- "summary": "Model Serving \u2014 Inference Requests Risk 9.10"
113
- },
114
- {
115
- "component": "Model Serving \u2014 Inference Response",
116
- "identifier": "10.3",
117
- "riskId": "model-serving-inference-response-10.3",
118
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
119
- },
120
- {
121
- "component": "Model Serving \u2014 Inference Response",
122
- "identifier": "10.4",
123
- "riskId": "model-serving-inference-response-10.4",
124
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
125
- }
126
- ],
127
  "description": "Implementing single sign-on with an identity provider\u2019s (IdP) multi-factor authentication is critical for secure authentication. It adds an extra layer of security, ensuring that only authorized users access the Databricks Platform.",
128
  "controlCategory": "Configuration",
129
- "readableControlId": "DASF 1"
 
 
130
  },
131
  {
132
  "controlId": "DASF-2",
133
  "title": "Sync users and groups",
134
- "risks": [
135
- {
136
- "component": "Raw Data",
137
- "identifier": "1.1",
138
- "riskId": "raw-data-1.1",
139
- "summary": "Raw Data Risk 1.1"
140
- },
141
- {
142
- "component": "Data Prep",
143
- "identifier": "2.1",
144
- "riskId": "data-prep-2.1",
145
- "summary": "Data Prep Risk 2.1"
146
- },
147
- {
148
- "component": "Data Prep",
149
- "identifier": "2.2",
150
- "riskId": "data-prep-2.2",
151
- "summary": "Data Prep Risk 2.2"
152
- },
153
- {
154
- "component": "Data Prep",
155
- "identifier": "2.3",
156
- "riskId": "data-prep-2.3",
157
- "summary": "Data Prep Risk 2.3"
158
- },
159
- {
160
- "component": "Data Prep",
161
- "identifier": "2.4",
162
- "riskId": "data-prep-2.4",
163
- "summary": "Data Prep Risk 2.4"
164
- },
165
- {
166
- "component": "Datasets",
167
- "identifier": "3.1",
168
- "riskId": "datasets-3.1",
169
- "summary": "Datasets Risk 3.1"
170
- },
171
- {
172
- "component": "Evaluation",
173
- "identifier": "6.1",
174
- "riskId": "evaluation-6.1",
175
- "summary": "Evaluation Risk 6.1"
176
- },
177
- {
178
- "component": "Model",
179
- "identifier": "7.2",
180
- "riskId": "model-7.2",
181
- "summary": "Model Risk 7.2"
182
- },
183
- {
184
- "component": "Model Management",
185
- "identifier": "8.2",
186
- "riskId": "model-management-8.2",
187
- "summary": "Model Management Risk 8.2"
188
- },
189
- {
190
- "component": "Model Management",
191
- "identifier": "8.4",
192
- "riskId": "model-management-8.4",
193
- "summary": "Model Management Risk 8.4"
194
- },
195
- {
196
- "component": "Model Serving \u2014 Inference Requests",
197
- "identifier": "9.1",
198
- "riskId": "model-serving-inference-requests-9.1",
199
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
200
- },
201
- {
202
- "component": "Model Serving \u2014 Inference Requests",
203
- "identifier": "9.2",
204
- "riskId": "model-serving-inference-requests-9.2",
205
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
206
- },
207
- {
208
- "component": "Model Serving \u2014 Inference Requests",
209
- "identifier": "9.5",
210
- "riskId": "model-serving-inference-requests-9.5",
211
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
212
- },
213
- {
214
- "component": "Model Serving \u2014 Inference Requests",
215
- "identifier": "9.6",
216
- "riskId": "model-serving-inference-requests-9.6",
217
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
218
- },
219
- {
220
- "component": "Model Serving \u2014 Inference Requests",
221
- "identifier": "9.7",
222
- "riskId": "model-serving-inference-requests-9.7",
223
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
224
- },
225
- {
226
- "component": "Model Serving \u2014 Inference Requests",
227
- "identifier": "9.9",
228
- "riskId": "model-serving-inference-requests-9.9",
229
- "summary": "Model Serving \u2014 Inference Requests Risk 9.9"
230
- },
231
- {
232
- "component": "Model Serving \u2014 Inference Requests",
233
- "identifier": "9.10",
234
- "riskId": "model-serving-inference-requests-9.10",
235
- "summary": "Model Serving \u2014 Inference Requests Risk 9.10"
236
- },
237
- {
238
- "component": "Model Serving \u2014 Inference Response",
239
- "identifier": "10.3",
240
- "riskId": "model-serving-inference-response-10.3",
241
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
242
- },
243
- {
244
- "component": "Model Serving \u2014 Inference Response",
245
- "identifier": "10.4",
246
- "riskId": "model-serving-inference-response-10.4",
247
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
248
- }
249
- ],
250
  "description": "Synchronizing users and groups from your identity provider (IdP) with Databricks using the SCIM standard facilitates consistent and automated user provisioning for enhancing security.",
251
  "controlCategory": "Configuration",
252
- "readableControlId": "DASF 2"
 
 
253
  },
254
  {
255
  "controlId": "DASF-3",
256
  "title": "Restrict access using IP access lists",
257
- "risks": [
258
- {
259
- "component": "Raw Data",
260
- "identifier": "1.1",
261
- "riskId": "raw-data-1.1",
262
- "summary": "Raw Data Risk 1.1"
263
- },
264
- {
265
- "component": "Data Prep",
266
- "identifier": "2.1",
267
- "riskId": "data-prep-2.1",
268
- "summary": "Data Prep Risk 2.1"
269
- },
270
- {
271
- "component": "Data Prep",
272
- "identifier": "2.2",
273
- "riskId": "data-prep-2.2",
274
- "summary": "Data Prep Risk 2.2"
275
- },
276
- {
277
- "component": "Data Prep",
278
- "identifier": "2.3",
279
- "riskId": "data-prep-2.3",
280
- "summary": "Data Prep Risk 2.3"
281
- },
282
- {
283
- "component": "Data Prep",
284
- "identifier": "2.4",
285
- "riskId": "data-prep-2.4",
286
- "summary": "Data Prep Risk 2.4"
287
- },
288
- {
289
- "component": "Datasets",
290
- "identifier": "3.1",
291
- "riskId": "datasets-3.1",
292
- "summary": "Datasets Risk 3.1"
293
- },
294
- {
295
- "component": "Evaluation",
296
- "identifier": "6.1",
297
- "riskId": "evaluation-6.1",
298
- "summary": "Evaluation Risk 6.1"
299
- },
300
- {
301
- "component": "Model",
302
- "identifier": "7.2",
303
- "riskId": "model-7.2",
304
- "summary": "Model Risk 7.2"
305
- },
306
- {
307
- "component": "Model Management",
308
- "identifier": "8.2",
309
- "riskId": "model-management-8.2",
310
- "summary": "Model Management Risk 8.2"
311
- },
312
- {
313
- "component": "Model Management",
314
- "identifier": "8.4",
315
- "riskId": "model-management-8.4",
316
- "summary": "Model Management Risk 8.4"
317
- },
318
- {
319
- "component": "Model Serving \u2014 Inference Requests",
320
- "identifier": "9.1",
321
- "riskId": "model-serving-inference-requests-9.1",
322
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
323
- },
324
- {
325
- "component": "Model Serving \u2014 Inference Requests",
326
- "identifier": "9.2",
327
- "riskId": "model-serving-inference-requests-9.2",
328
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
329
- },
330
- {
331
- "component": "Model Serving \u2014 Inference Requests",
332
- "identifier": "9.5",
333
- "riskId": "model-serving-inference-requests-9.5",
334
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
335
- },
336
- {
337
- "component": "Model Serving \u2014 Inference Requests",
338
- "identifier": "9.6",
339
- "riskId": "model-serving-inference-requests-9.6",
340
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
341
- },
342
- {
343
- "component": "Model Serving \u2014 Inference Requests",
344
- "identifier": "9.7",
345
- "riskId": "model-serving-inference-requests-9.7",
346
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
347
- },
348
- {
349
- "component": "Model Serving \u2014 Inference Requests",
350
- "identifier": "9.9",
351
- "riskId": "model-serving-inference-requests-9.9",
352
- "summary": "Model Serving \u2014 Inference Requests Risk 9.9"
353
- },
354
- {
355
- "component": "Model Serving \u2014 Inference Requests",
356
- "identifier": "9.10",
357
- "riskId": "model-serving-inference-requests-9.10",
358
- "summary": "Model Serving \u2014 Inference Requests Risk 9.10"
359
- },
360
- {
361
- "component": "Model Serving \u2014 Inference Response",
362
- "identifier": "10.3",
363
- "riskId": "model-serving-inference-response-10.3",
364
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
365
- },
366
- {
367
- "component": "Model Serving \u2014 Inference Response",
368
- "identifier": "10.4",
369
- "riskId": "model-serving-inference-response-10.4",
370
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
371
- }
372
- ],
373
  "description": "Configure IP access lists to restrict authentication to Databricks from specific IP ranges, such as VPNs or office networks, and strengthen network security by preventing unauthorized access from untrusted locations.",
374
  "controlCategory": "Configuration",
375
- "readableControlId": "DASF 3"
 
 
376
  },
377
  {
378
  "controlId": "DASF-4",
379
  "title": "Restrict access using private link",
380
- "risks": [
381
- {
382
- "component": "Raw Data",
383
- "identifier": "1.1",
384
- "riskId": "raw-data-1.1",
385
- "summary": "Raw Data Risk 1.1"
386
- },
387
- {
388
- "component": "Data Prep",
389
- "identifier": "2.1",
390
- "riskId": "data-prep-2.1",
391
- "summary": "Data Prep Risk 2.1"
392
- },
393
- {
394
- "component": "Data Prep",
395
- "identifier": "2.2",
396
- "riskId": "data-prep-2.2",
397
- "summary": "Data Prep Risk 2.2"
398
- },
399
- {
400
- "component": "Data Prep",
401
- "identifier": "2.3",
402
- "riskId": "data-prep-2.3",
403
- "summary": "Data Prep Risk 2.3"
404
- },
405
- {
406
- "component": "Data Prep",
407
- "identifier": "2.4",
408
- "riskId": "data-prep-2.4",
409
- "summary": "Data Prep Risk 2.4"
410
- },
411
- {
412
- "component": "Datasets",
413
- "identifier": "3.1",
414
- "riskId": "datasets-3.1",
415
- "summary": "Datasets Risk 3.1"
416
- },
417
- {
418
- "component": "Evaluation",
419
- "identifier": "6.1",
420
- "riskId": "evaluation-6.1",
421
- "summary": "Evaluation Risk 6.1"
422
- },
423
- {
424
- "component": "Model",
425
- "identifier": "7.2",
426
- "riskId": "model-7.2",
427
- "summary": "Model Risk 7.2"
428
- },
429
- {
430
- "component": "Model Management",
431
- "identifier": "8.2",
432
- "riskId": "model-management-8.2",
433
- "summary": "Model Management Risk 8.2"
434
- },
435
- {
436
- "component": "Model Management",
437
- "identifier": "8.4",
438
- "riskId": "model-management-8.4",
439
- "summary": "Model Management Risk 8.4"
440
- },
441
- {
442
- "component": "Model Serving \u2014 Inference Requests",
443
- "identifier": "9.1",
444
- "riskId": "model-serving-inference-requests-9.1",
445
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
446
- },
447
- {
448
- "component": "Model Serving \u2014 Inference Requests",
449
- "identifier": "9.2",
450
- "riskId": "model-serving-inference-requests-9.2",
451
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
452
- },
453
- {
454
- "component": "Model Serving \u2014 Inference Requests",
455
- "identifier": "9.5",
456
- "riskId": "model-serving-inference-requests-9.5",
457
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
458
- },
459
- {
460
- "component": "Model Serving \u2014 Inference Requests",
461
- "identifier": "9.6",
462
- "riskId": "model-serving-inference-requests-9.6",
463
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
464
- },
465
- {
466
- "component": "Model Serving \u2014 Inference Requests",
467
- "identifier": "9.7",
468
- "riskId": "model-serving-inference-requests-9.7",
469
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
470
- },
471
- {
472
- "component": "Model Serving \u2014 Inference Requests",
473
- "identifier": "9.9",
474
- "riskId": "model-serving-inference-requests-9.9",
475
- "summary": "Model Serving \u2014 Inference Requests Risk 9.9"
476
- },
477
- {
478
- "component": "Model Serving \u2014 Inference Requests",
479
- "identifier": "9.10",
480
- "riskId": "model-serving-inference-requests-9.10",
481
- "summary": "Model Serving \u2014 Inference Requests Risk 9.10"
482
- },
483
- {
484
- "component": "Model Serving \u2014 Inference Response",
485
- "identifier": "10.3",
486
- "riskId": "model-serving-inference-response-10.3",
487
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
488
- },
489
- {
490
- "component": "Model Serving \u2014 Inference Response",
491
- "identifier": "10.4",
492
- "riskId": "model-serving-inference-response-10.4",
493
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
494
- }
495
- ],
496
  "description": "Use AWS PrivateLink, Azure Private Link or GCP Private Service Connect to create a private network route between the customer and the Databricks control plane or the control plane and the customer\u2019s compute plane environments to enhance data security by avoiding public internet exposure.",
497
  "controlCategory": "Configuration",
498
- "readableControlId": "DASF 4"
 
 
499
  },
500
  {
501
  "controlId": "DASF-5",
502
  "title": "Control access to data and other objects",
503
- "risks": [
504
- {
505
- "component": "Raw Data",
506
- "identifier": "1.1",
507
- "riskId": "raw-data-1.1",
508
- "summary": "Raw Data Risk 1.1"
509
- },
510
- {
511
- "component": "Raw Data",
512
- "identifier": "1.4",
513
- "riskId": "raw-data-1.4",
514
- "summary": "Raw Data Risk 1.4"
515
- },
516
- {
517
- "component": "Data Prep",
518
- "identifier": "2.1",
519
- "riskId": "data-prep-2.1",
520
- "summary": "Data Prep Risk 2.1"
521
- },
522
- {
523
- "component": "Datasets",
524
- "identifier": "3.1",
525
- "riskId": "datasets-3.1",
526
- "summary": "Datasets Risk 3.1"
527
- },
528
- {
529
- "component": "Datasets",
530
- "identifier": "3.2",
531
- "riskId": "datasets-3.2",
532
- "summary": "Datasets Risk 3.2"
533
- },
534
- {
535
- "component": "Datasets",
536
- "identifier": "3.3",
537
- "riskId": "datasets-3.3",
538
- "summary": "Datasets Risk 3.3"
539
- },
540
- {
541
- "component": "Governance",
542
- "identifier": "4.1",
543
- "riskId": "governance-4.1",
544
- "summary": "Governance Risk 4.1"
545
- },
546
- {
547
- "component": "Evaluation",
548
- "identifier": "6.1",
549
- "riskId": "evaluation-6.1",
550
- "summary": "Evaluation Risk 6.1"
551
- },
552
- {
553
- "component": "Model",
554
- "identifier": "7.1",
555
- "riskId": "model-7.1",
556
- "summary": "Model Risk 7.1"
557
- },
558
- {
559
- "component": "Model",
560
- "identifier": "7.2",
561
- "riskId": "model-7.2",
562
- "summary": "Model Risk 7.2"
563
- },
564
- {
565
- "component": "Model Management",
566
- "identifier": "8.1",
567
- "riskId": "model-management-8.1",
568
- "summary": "Model Management Risk 8.1"
569
- },
570
- {
571
- "component": "Model Management",
572
- "identifier": "8.2",
573
- "riskId": "model-management-8.2",
574
- "summary": "Model Management Risk 8.2"
575
- },
576
- {
577
- "component": "Model Management",
578
- "identifier": "8.3",
579
- "riskId": "model-management-8.3",
580
- "summary": "Model Management Risk 8.3"
581
- },
582
- {
583
- "component": "Model Management",
584
- "identifier": "8.4",
585
- "riskId": "model-management-8.4",
586
- "summary": "Model Management Risk 8.4"
587
- },
588
- {
589
- "component": "Model Serving \u2014 Inference Requests",
590
- "identifier": "9.1",
591
- "riskId": "model-serving-inference-requests-9.1",
592
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
593
- },
594
- {
595
- "component": "Model Serving \u2014 Inference Requests",
596
- "identifier": "9.2",
597
- "riskId": "model-serving-inference-requests-9.2",
598
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
599
- },
600
- {
601
- "component": "Model Serving \u2014 Inference Requests",
602
- "identifier": "9.5",
603
- "riskId": "model-serving-inference-requests-9.5",
604
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
605
- },
606
- {
607
- "component": "Model Serving \u2014 Inference Requests",
608
- "identifier": "9.6",
609
- "riskId": "model-serving-inference-requests-9.6",
610
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
611
- },
612
- {
613
- "component": "Model Serving \u2014 Inference Requests",
614
- "identifier": "9.7",
615
- "riskId": "model-serving-inference-requests-9.7",
616
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
617
- },
618
- {
619
- "component": "Model Serving \u2014 Inference Requests",
620
- "identifier": "9.9",
621
- "riskId": "model-serving-inference-requests-9.9",
622
- "summary": "Model Serving \u2014 Inference Requests Risk 9.9"
623
- },
624
- {
625
- "component": "Model Serving \u2014 Inference Requests",
626
- "identifier": "9.10",
627
- "riskId": "model-serving-inference-requests-9.10",
628
- "summary": "Model Serving \u2014 Inference Requests Risk 9.10"
629
- },
630
- {
631
- "component": "Model Serving \u2014 Inference Response",
632
- "identifier": "10.3",
633
- "riskId": "model-serving-inference-response-10.3",
634
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
635
- },
636
- {
637
- "component": "Model Serving \u2014 Inference Response",
638
- "identifier": "10.4",
639
- "riskId": "model-serving-inference-response-10.4",
640
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
641
- }
642
- ],
643
  "description": "Implementing Unity Catalog for unified permissions management and assets simplifies access control and enhances security.",
644
  "controlCategory": "Implementation",
645
- "readableControlId": "DASF 5"
 
 
646
  },
647
  {
648
  "controlId": "DASF-6",
649
  "title": "Classify data",
650
- "risks": [
651
- {
652
- "component": "Raw Data",
653
- "identifier": "1.2",
654
- "riskId": "raw-data-1.2",
655
- "summary": "Raw Data Risk 1.2"
656
- }
657
- ],
658
  "description": "Tags are attributes containing keys and optional values that you can apply to different securable objects in Unity Catalog. Organizing securable objects with tags in Unity Catalog aids in efficient data management, data discovery and classification, essential for handling large datasets.",
659
  "controlCategory": "Implementation",
660
- "readableControlId": "DASF 6"
 
 
661
  },
662
  {
663
  "controlId": "DASF-7",
664
  "title": "Enforce Data Quality Checks on Batch and Streaming Datasets",
665
- "risks": [
666
- {
667
- "component": "Raw Data",
668
- "identifier": "1.3",
669
- "riskId": "raw-data-1.3",
670
- "summary": "Raw Data Risk 1.3"
671
- },
672
- {
673
- "component": "Raw Data",
674
- "identifier": "1.9",
675
- "riskId": "raw-data-1.9",
676
- "summary": "Raw Data Risk 1.9"
677
- },
678
- {
679
- "component": "Data Prep",
680
- "identifier": "2.1",
681
- "riskId": "data-prep-2.1",
682
- "summary": "Data Prep Risk 2.1"
683
- },
684
- {
685
- "component": "Datasets",
686
- "identifier": "3.1",
687
- "riskId": "datasets-3.1",
688
- "summary": "Datasets Risk 3.1"
689
- },
690
- {
691
- "component": "Governance",
692
- "identifier": "4.1",
693
- "riskId": "governance-4.1",
694
- "summary": "Governance Risk 4.1"
695
- },
696
- {
697
- "component": "Evaluation",
698
- "identifier": "6.1",
699
- "riskId": "evaluation-6.1",
700
- "summary": "Evaluation Risk 6.1"
701
- }
702
- ],
703
  "description": "Databricks Delta Live Tables (DLT) simplifies ETL development with declarative pipelines that integrate quality control checks and performance monitoring.",
704
  "controlCategory": "Implementation",
705
- "readableControlId": "DASF 7"
 
 
706
  },
707
  {
708
  "controlId": "DASF-8",
709
  "title": "Encrypt data at rest",
710
- "risks": [
711
- {
712
- "component": "Raw Data",
713
- "identifier": "1.4",
714
- "riskId": "raw-data-1.4",
715
- "summary": "Raw Data Risk 1.4"
716
- },
717
- {
718
- "component": "Datasets",
719
- "identifier": "3.2",
720
- "riskId": "datasets-3.2",
721
- "summary": "Datasets Risk 3.2"
722
- },
723
- {
724
- "component": "Datasets",
725
- "identifier": "3.3",
726
- "riskId": "datasets-3.3",
727
- "summary": "Datasets Risk 3.3"
728
- }
729
- ],
730
  "description": "Databricks supports customer-managed encryption keys to strengthen data at rest protection and greater access control.",
731
  "controlCategory": "Configuration",
732
- "readableControlId": "DASF 8"
 
 
733
  },
734
  {
735
  "controlId": "DASF-9",
736
  "title": "Encrypt data in transit",
737
- "risks": [
738
- {
739
- "component": "Raw Data",
740
- "identifier": "1.4",
741
- "riskId": "raw-data-1.4",
742
- "summary": "Raw Data Risk 1.4"
743
- },
744
- {
745
- "component": "Datasets",
746
- "identifier": "3.2",
747
- "riskId": "datasets-3.2",
748
- "summary": "Datasets Risk 3.2"
749
- },
750
- {
751
- "component": "Datasets",
752
- "identifier": "3.3",
753
- "riskId": "datasets-3.3",
754
- "summary": "Datasets Risk 3.3"
755
- }
756
- ],
757
  "description": "Databricks supports TLS 1.2+ encryption to protect customer data during transit. This applies to data transfer between the customer and the Databricks control plane and within the compute plane. Customers can also secure inter-cluster communications within the compute plane per their security requirements.",
758
  "controlCategory": "Out-of-the-box",
759
- "readableControlId": "DASF 9"
 
 
760
  },
761
  {
762
  "controlId": "DASF-10",
763
  "title": "Version data",
764
- "risks": [
765
- {
766
- "component": "Raw Data",
767
- "identifier": "1.5",
768
- "riskId": "raw-data-1.5",
769
- "summary": "Raw Data Risk 1.5"
770
- },
771
- {
772
- "component": "Raw Data",
773
- "identifier": "1.7",
774
- "riskId": "raw-data-1.7",
775
- "summary": "Raw Data Risk 1.7"
776
- }
777
- ],
778
  "description": "Store data in a lakehouse architecture using Delta tables. Delta tables can be versioned to revert any user\u2019s or malicious actor\u2019s poisoning of data. Data can be stored in a lakehouse architecture in the customer\u2019s cloud account. Both raw data and feature tables are stored as Delta tables with access controls to determine who can read and modify them. Data lineage with UC helps track and audit changes and the origin of ML data sources. Each operation that modifies a Delta Lake table creates a new table version. User actions are tracked and audited, and lineage of transformations is available all in the same platform. You can use history information to audit operations, roll back a table or query a table at a specific point in time using time travel.",
779
  "controlCategory": "Implementation",
780
- "readableControlId": "DASF 10"
 
 
781
  },
782
  {
783
  "controlId": "DASF-11",
784
  "title": "Capture and view data lineage",
785
- "risks": [
786
- {
787
- "component": "Raw Data",
788
- "identifier": "1.6",
789
- "riskId": "raw-data-1.6",
790
- "summary": "Raw Data Risk 1.6"
791
- },
792
- {
793
- "component": "Data Prep",
794
- "identifier": "2.1",
795
- "riskId": "data-prep-2.1",
796
- "summary": "Data Prep Risk 2.1"
797
- },
798
- {
799
- "component": "Datasets",
800
- "identifier": "3.1",
801
- "riskId": "datasets-3.1",
802
- "summary": "Datasets Risk 3.1"
803
- },
804
- {
805
- "component": "Governance",
806
- "identifier": "4.1",
807
- "riskId": "governance-4.1",
808
- "summary": "Governance Risk 4.1"
809
- },
810
- {
811
- "component": "Evaluation",
812
- "identifier": "6.1",
813
- "riskId": "evaluation-6.1",
814
- "summary": "Evaluation Risk 6.1"
815
- }
816
- ],
817
  "description": "Unity Catalog tracks and visualizes real-time data lineage across all languages to the column level, providing a traceable history of an object from notebooks, workflows, models and dashboards. This enhances transparency and compliance, with accessibility provided through the Catalog Explorer.",
818
  "controlCategory": "Out-of-the-box",
819
- "readableControlId": "DASF 11"
 
 
820
  },
821
  {
822
  "controlId": "DASF-12",
823
  "title": "Delete records from datasets",
824
- "risks": [
825
- {
826
- "component": "Raw Data",
827
- "identifier": "1.8",
828
- "riskId": "raw-data-1.8",
829
- "summary": "Raw Data Risk 1.8"
830
- }
831
- ],
832
  "description": "Data governance in Delta Lake, the lakehouse storage layer, utilizes its atomicity, consistency, isolation, durability (ACID) properties for effective data management. This includes the capability to remove data based on specific predicates from a Delta Table, including the complete removal of data\u2019s history, supporting compliance with regulations like GDPR and CCPA.",
833
  "controlCategory": "Implementation",
834
- "readableControlId": "DASF 12"
 
 
835
  },
836
  {
837
  "controlId": "DASF-13",
838
  "title": "Use near real-time data",
839
- "risks": [
840
- {
841
- "component": "Raw Data",
842
- "identifier": "1.9",
843
- "riskId": "raw-data-1.9",
844
- "summary": "Raw Data Risk 1.9"
845
- }
846
- ],
847
  "description": "Use Databricks for near real-time data ingestion, processing, machine learning, and AI for streaming data.",
848
  "controlCategory": "Implementation",
849
- "readableControlId": "DASF 13"
 
 
850
  },
851
  {
852
  "controlId": "DASF-14",
853
  "title": "Audit actions performed on datasets",
854
- "risks": [
855
- {
856
- "component": "Raw Data",
857
- "identifier": "1.10",
858
- "riskId": "raw-data-1.10",
859
- "summary": "Raw Data Risk 1.10"
860
- },
861
- {
862
- "component": "Datasets",
863
- "identifier": "3.1",
864
- "riskId": "datasets-3.1",
865
- "summary": "Datasets Risk 3.1"
866
- }
867
- ],
868
  "description": "Databricks auditing, enhanced by Unity Catalog\u2019s events, delivers fine-grained visibility into data access and user activities. This is vital for robust data governance and security, especially in regulated industries. It enables organizations to proactively identify and manage overentitled users, enhancing data security and ensuring compliance.",
869
  "controlCategory": "Implementation",
870
- "readableControlId": "DASF 14"
 
 
871
  },
872
  {
873
  "controlId": "DASF-15",
874
  "title": "Explore datasets and identify problems",
875
- "risks": [
876
- {
877
- "component": "Data Prep",
878
- "identifier": "2.1",
879
- "riskId": "data-prep-2.1",
880
- "summary": "Data Prep Risk 2.1"
881
- }
882
- ],
883
  "description": "Iteratively explore, share and prep data for the machine learning lifecycle by creating reproducible, editable and shareable datasets, tables and visualizations. Within Databricks this EDA process can be accelerated with Mosaic AI AutoML. AutoML not only generates baseline models given a dataset, but also provides the underlying model training code in the form of a Python notebook. Notably for EDA, AutoML calculates summary statistics on the provided dataset, creating a notebook for the data scientist to review and adapt.",
884
  "controlCategory": "Implementation",
885
- "readableControlId": "DASF 15"
 
 
886
  },
887
  {
888
  "controlId": "DASF-16",
889
  "title": "Secure model features",
890
- "risks": [
891
- {
892
- "component": "Data Prep",
893
- "identifier": "2.1",
894
- "riskId": "data-prep-2.1",
895
- "summary": "Data Prep Risk 2.1"
896
- },
897
- {
898
- "component": "Data Prep",
899
- "identifier": "2.2",
900
- "riskId": "data-prep-2.2",
901
- "summary": "Data Prep Risk 2.2"
902
- },
903
- {
904
- "component": "Datasets",
905
- "identifier": "3.1",
906
- "riskId": "datasets-3.1",
907
- "summary": "Datasets Risk 3.1"
908
- },
909
- {
910
- "component": "Governance",
911
- "identifier": "4.1",
912
- "riskId": "governance-4.1",
913
- "summary": "Governance Risk 4.1"
914
- },
915
- {
916
- "component": "Algorithms",
917
- "identifier": "5.2",
918
- "riskId": "algorithms-5.2",
919
- "summary": "Algorithms Risk 5.2"
920
- },
921
- {
922
- "component": "Model Serving \u2014 Inference Requests",
923
- "identifier": "9.10",
924
- "riskId": "model-serving-inference-requests-9.10",
925
- "summary": "Model Serving \u2014 Inference Requests Risk 9.10"
926
- }
927
- ],
928
  "description": "Databricks Feature Store is a centralized repository that enables data scientists to find and share features and also ensures that the same code used to compute the feature values is used for model training and inference. Unity Catalog\u2019s capabilities, such as security, lineage, table history, tagging and cross-workspace access, are automatically available to the feature table to reduce the risk of malicious actors manipulating the features that feed into ML training.",
929
  "controlCategory": "Implementation",
930
- "readableControlId": "DASF 16"
 
 
931
  },
932
  {
933
  "controlId": "DASF-17",
934
  "title": "Track and reproduce the training data used for ML model training",
935
- "risks": [
936
- {
937
- "component": "Data Prep",
938
- "identifier": "2.4",
939
- "riskId": "data-prep-2.4",
940
- "summary": "Data Prep Risk 2.4"
941
- },
942
- {
943
- "component": "Datasets",
944
- "identifier": "3.1",
945
- "riskId": "datasets-3.1",
946
- "summary": "Datasets Risk 3.1"
947
- },
948
- {
949
- "component": "Governance",
950
- "identifier": "4.1",
951
- "riskId": "governance-4.1",
952
- "summary": "Governance Risk 4.1"
953
- },
954
- {
955
- "component": "Algorithms",
956
- "identifier": "5.2",
957
- "riskId": "algorithms-5.2",
958
- "summary": "Algorithms Risk 5.2"
959
- }
960
- ],
961
  "description": "MLflow with Delta Lake tracks the training data used for ML model training. It also enables the identification of specific ML models and runs derived from particular datasets for regulatory and auditable attribution.",
962
  "controlCategory": "Configuration",
963
- "readableControlId": "DASF 17"
 
 
964
  },
965
  {
966
  "controlId": "DASF-18",
967
  "title": "Govern model assets",
968
- "risks": [
969
- {
970
- "component": "Governance",
971
- "identifier": "4.1",
972
- "riskId": "governance-4.1",
973
- "summary": "Governance Risk 4.1"
974
- }
975
- ],
976
  "description": "With Unity Catalog, organizations can implement a unified governance framework for their structured and unstructured data, machine learning models, notebooks, features, functions, and files, enhancing security and compliance across clouds and platforms.",
977
  "controlCategory": "Configuration",
978
- "readableControlId": "DASF 18"
 
 
979
  },
980
  {
981
  "controlId": "DASF-19",
982
  "title": "Manage end-to-end machine learning lifecycle",
983
- "risks": [
984
- {
985
- "component": "Governance",
986
- "identifier": "4.2",
987
- "riskId": "governance-4.2",
988
- "summary": "Governance Risk 4.2"
989
- },
990
- {
991
- "component": "Model",
992
- "identifier": "7.1",
993
- "riskId": "model-7.1",
994
- "summary": "Model Risk 7.1"
995
- }
996
- ],
997
  "description": "Databricks includes a managed version of MLflow featuring enterprise security controls and high availability. It supports functionalities like experiments, run management and notebook revision capture. MLflow on Databricks allows tracking and measuring machine learning model training runs, logging model training artifacts and securing machine learning projects.",
998
  "controlCategory": "Implementation",
999
- "readableControlId": "DASF 19"
 
 
1000
  },
1001
  {
1002
  "controlId": "DASF-20",
1003
  "title": "Track ML training runs",
1004
- "risks": [
1005
- {
1006
- "component": "Algorithms",
1007
- "identifier": "5.1",
1008
- "riskId": "algorithms-5.1",
1009
- "summary": "Algorithms Risk 5.1"
1010
- },
1011
- {
1012
- "component": "Algorithms",
1013
- "identifier": "5.3",
1014
- "riskId": "algorithms-5.3",
1015
- "summary": "Algorithms Risk 5.3"
1016
- }
1017
- ],
1018
  "description": "MLflow tracking facilitates the automated recording and retrieval of experiment details, including algorithms, code, datasets, parameters, configurations, signatures and artifacts.",
1019
  "controlCategory": "Implementation",
1020
- "readableControlId": "DASF 20"
 
 
1021
  },
1022
  {
1023
  "controlId": "DASF-21",
1024
  "title": "Monitor data and AI system from a single pane of glass",
1025
- "risks": [
1026
- {
1027
- "component": "Raw Data",
1028
- "identifier": "1.3",
1029
- "riskId": "raw-data-1.3",
1030
- "summary": "Raw Data Risk 1.3"
1031
- },
1032
- {
1033
- "component": "Governance",
1034
- "identifier": "4.2",
1035
- "riskId": "governance-4.2",
1036
- "summary": "Governance Risk 4.2"
1037
- },
1038
- {
1039
- "component": "Algorithms",
1040
- "identifier": "5.2",
1041
- "riskId": "algorithms-5.2",
1042
- "summary": "Algorithms Risk 5.2"
1043
- }
1044
- ],
1045
  "description": "Databricks Lakehouse Monitoring offers a single pane of glass to centrally track tables\u2019 data quality and statistical properties and automatically classifies data. It can also track the performance of machine learning models and model serving endpoints by monitoring inference tables containing model inputs and predictions through a single pane of glass.",
1046
  "controlCategory": "Implementation",
1047
- "readableControlId": "DASF 21"
 
 
1048
  },
1049
  {
1050
  "controlId": "DASF-22",
1051
  "title": "Build models with all representative, accurate and relevant data sources",
1052
- "risks": [
1053
- {
1054
- "component": "Evaluation",
1055
- "identifier": "6.2",
1056
- "riskId": "evaluation-6.2",
1057
- "summary": "Evaluation Risk 6.2"
1058
- },
1059
- {
1060
- "component": "Model",
1061
- "identifier": "7.3",
1062
- "riskId": "model-7.3",
1063
- "summary": "Model Risk 7.3"
1064
- }
1065
- ],
1066
  "description": "Harnessing internal data and intellectual property to customize large AI models can offer a significant competitive edge. However, this process can be complex, involving coordination across various parts of the organization. The Data Intelligence Platform addresses this challenge by integrating data across traditionally isolated departments and systems. This integration facilitates a more cohesive data and AI strategy, enabling the effective training, testing and evaluation of models using a comprehensive dataset. Use caution when preparing data for traditional models and GenAI training to ensure that you are not unintentionally including data that causes legal conflicts, such as copyright violations, privacy violations or HIPAA violations.",
1067
  "controlCategory": "Implementation",
1068
- "readableControlId": "DASF 22"
 
 
1069
  },
1070
  {
1071
  "controlId": "DASF-23",
1072
  "title": "Register, version, approve, promote and deploy model",
1073
- "risks": [
1074
- {
1075
- "component": "Model",
1076
- "identifier": "7.1",
1077
- "riskId": "model-7.1",
1078
- "summary": "Model Risk 7.1"
1079
- }
1080
- ],
1081
  "description": "MLflow Model Registry supports managing the machine learning model lifecycle with capabilities for lineage tracking, versioning, staging and model serving.",
1082
  "controlCategory": "Implementation",
1083
- "readableControlId": "DASF 23"
 
 
1084
  },
1085
  {
1086
  "controlId": "DASF-24",
1087
  "title": "Control access to models and model assets",
1088
- "risks": [
1089
- {
1090
- "component": "Model",
1091
- "identifier": "7.2",
1092
- "riskId": "model-7.2",
1093
- "summary": "Model Risk 7.2"
1094
- },
1095
- {
1096
- "component": "Model Management",
1097
- "identifier": "8.2",
1098
- "riskId": "model-management-8.2",
1099
- "summary": "Model Management Risk 8.2"
1100
- },
1101
- {
1102
- "component": "Model Management",
1103
- "identifier": "8.3",
1104
- "riskId": "model-management-8.3",
1105
- "summary": "Model Management Risk 8.3"
1106
- },
1107
- {
1108
- "component": "Model Management",
1109
- "identifier": "8.4",
1110
- "riskId": "model-management-8.4",
1111
- "summary": "Model Management Risk 8.4"
1112
- },
1113
- {
1114
- "component": "Model Serving \u2014 Inference Requests",
1115
- "identifier": "9.1",
1116
- "riskId": "model-serving-inference-requests-9.1",
1117
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
1118
- },
1119
- {
1120
- "component": "Model Serving \u2014 Inference Requests",
1121
- "identifier": "9.2",
1122
- "riskId": "model-serving-inference-requests-9.2",
1123
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
1124
- },
1125
- {
1126
- "component": "Model Serving \u2014 Inference Requests",
1127
- "identifier": "9.5",
1128
- "riskId": "model-serving-inference-requests-9.5",
1129
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
1130
- },
1131
- {
1132
- "component": "Model Serving \u2014 Inference Requests",
1133
- "identifier": "9.6",
1134
- "riskId": "model-serving-inference-requests-9.6",
1135
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
1136
- },
1137
- {
1138
- "component": "Model Serving \u2014 Inference Requests",
1139
- "identifier": "9.7",
1140
- "riskId": "model-serving-inference-requests-9.7",
1141
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
1142
- },
1143
- {
1144
- "component": "Model Serving \u2014 Inference Response",
1145
- "identifier": "10.3",
1146
- "riskId": "model-serving-inference-response-10.3",
1147
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
1148
- },
1149
- {
1150
- "component": "Model Serving \u2014 Inference Response",
1151
- "identifier": "10.4",
1152
- "riskId": "model-serving-inference-response-10.4",
1153
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
1154
- }
1155
- ],
1156
  "description": "Organizations commonly encounter challenges in tracking and controlling access to ML models, auditing their usage, and understanding their evolution in complex machine learning workflows. Unity Catalog integrates with the MLflow Model Registry across model lifecycles. This approach simplifies the management and oversight of ML models, proving particularly valuable in environments with multiple teams and diverse projects.",
1157
  "controlCategory": "Implementation",
1158
- "readableControlId": "DASF 24"
 
 
1159
  },
1160
  {
1161
  "controlId": "DASF-25",
1162
  "title": "Use retrieval augmented generation (RAG) with large language models (LLMs)",
1163
- "risks": [
1164
- {
1165
- "component": "Evaluation",
1166
- "identifier": "6.2",
1167
- "riskId": "evaluation-6.2",
1168
- "summary": "Evaluation Risk 6.2"
1169
- },
1170
- {
1171
- "component": "Model Serving \u2014 Inference Requests",
1172
- "identifier": "9.8",
1173
- "riskId": "model-serving-inference-requests-9.8",
1174
- "summary": "Model Serving \u2014 Inference Requests Risk 9.8"
1175
- }
1176
- ],
1177
  "description": "Generating relevant and accurate responses in large language models (LLMs) while avoiding hallucinations requires grounding them in domain-specific knowledge. Retrieval augmented generation (RAG) addresses this by breaking down extensive datasets into manageable segments (\u201cchunks\u201d) that are \u201cvector embedded.\u201d These vector embeddings are mathematical representations that help the model understand and quantify different data segments. As a result, LLMs produce responses that are contextually relevant and deeply rooted in the specific domain knowledge.",
1178
  "controlCategory": "Implementation",
1179
- "readableControlId": "DASF 25"
 
 
1180
  },
1181
  {
1182
  "controlId": "DASF-26",
1183
  "title": "Fine-tune large language models (LLMs)",
1184
- "risks": [
1185
- {
1186
- "component": "Model Serving \u2014 Inference Requests",
1187
- "identifier": "9.8",
1188
- "riskId": "model-serving-inference-requests-9.8",
1189
- "summary": "Model Serving \u2014 Inference Requests Risk 9.8"
1190
- }
1191
- ],
1192
  "description": "Data is your competitive advantage. Use it to customize large AI models to beat your competition. Produce new model variants with tailored LLM response style and structure via fine-tuning. Fine-tune your own LLM with open models to own your IP.",
1193
  "controlCategory": "Implementation",
1194
- "readableControlId": "DASF 26"
 
 
1195
  },
1196
  {
1197
  "controlId": "DASF-27",
1198
  "title": "Pretrain a large language model (LLM)",
1199
- "risks": [
1200
- {
1201
- "component": "Raw Data",
1202
- "identifier": "1.8",
1203
- "riskId": "raw-data-1.8",
1204
- "summary": "Raw Data Risk 1.8"
1205
- },
1206
- {
1207
- "component": "Model",
1208
- "identifier": "7.3",
1209
- "riskId": "model-7.3",
1210
- "summary": "Model Risk 7.3"
1211
- },
1212
- {
1213
- "component": "Model Serving \u2014 Inference Requests",
1214
- "identifier": "9.8",
1215
- "riskId": "model-serving-inference-requests-9.8",
1216
- "summary": "Model Serving \u2014 Inference Requests Risk 9.8"
1217
- }
1218
- ],
1219
  "description": "Data is your competitive advantage. Use it to customize large AI models to beat your competition by pretraining models with your data, imbuing the model with domain-specific knowledge, vocabulary and semantics. Pretrain your own LLM with MosaicML to own your IP.",
1220
  "controlCategory": "Implementation",
1221
- "readableControlId": "DASF 27"
 
 
1222
  },
1223
  {
1224
  "controlId": "DASF-28",
1225
  "title": "Create model aliases, tags and annotations",
1226
- "risks": [
1227
- {
1228
- "component": "Model Management",
1229
- "identifier": "8.1",
1230
- "riskId": "model-management-8.1",
1231
- "summary": "Model Management Risk 8.1"
1232
- },
1233
- {
1234
- "component": "Model Management",
1235
- "identifier": "8.3",
1236
- "riskId": "model-management-8.3",
1237
- "summary": "Model Management Risk 8.3"
1238
- },
1239
- {
1240
- "component": "Model Serving \u2014 Inference Requests",
1241
- "identifier": "9.5",
1242
- "riskId": "model-serving-inference-requests-9.5",
1243
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
1244
- },
1245
- {
1246
- "component": "Model Serving \u2014 Inference Requests",
1247
- "identifier": "9.6",
1248
- "riskId": "model-serving-inference-requests-9.6",
1249
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
1250
- },
1251
- {
1252
- "component": "Model Serving \u2014 Inference Response",
1253
- "identifier": "10.3",
1254
- "riskId": "model-serving-inference-response-10.3",
1255
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
1256
- },
1257
- {
1258
- "component": "Model Serving \u2014 Inference Response",
1259
- "identifier": "10.4",
1260
- "riskId": "model-serving-inference-response-10.4",
1261
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
1262
- }
1263
- ],
1264
  "description": "Model aliases in machine learning workflows allow you to assign a mutable, named reference to a specific version of a registered model. This functionality is beneficial for tracking and managing different stages of a model\u2019s lifecycle, indicating the current deployment status of any given model version.",
1265
  "controlCategory": "Implementation",
1266
- "readableControlId": "DASF 28"
 
 
1267
  },
1268
  {
1269
  "controlId": "DASF-29",
1270
  "title": "Build MLOps workflows",
1271
- "risks": [
1272
- {
1273
- "component": "Raw Data",
1274
- "identifier": "1.8",
1275
- "riskId": "raw-data-1.8",
1276
- "summary": "Raw Data Risk 1.8"
1277
- },
1278
- {
1279
- "component": "Model Management",
1280
- "identifier": "8.1",
1281
- "riskId": "model-management-8.1",
1282
- "summary": "Model Management Risk 8.1"
1283
- },
1284
- {
1285
- "component": "Model Management",
1286
- "identifier": "8.3",
1287
- "riskId": "model-management-8.3",
1288
- "summary": "Model Management Risk 8.3"
1289
- }
1290
- ],
1291
  "description": "The lakehouse forms the foundation of a data-centric AI platform. Key to this is the ability to manage both data and AI assets from a unified governance solution on the lakehouse. Databricks Unity Catalog enables this by providing centralized access control, auditing, approvals, model workflow, lineage, and data discovery capabilities across Databricks workspaces. These benefits are now extended to MLflow Models with the introduction of Models in Unity Catalog. Through providing a hosted version of the MLflow Model Registry in Unity Catalog, the full lifecycle of an ML model can be managed while leveraging Unity Catalog\u2019s capability to share assets across Databricks workspaces and trace lineage across both data and models.",
1292
  "controlCategory": "Implementation",
1293
- "readableControlId": "DASF 29"
 
 
1294
  },
1295
  {
1296
  "controlId": "DASF-30",
1297
  "title": "Encrypt models",
1298
- "risks": [
1299
- {
1300
- "component": "Model Management",
1301
- "identifier": "8.2",
1302
- "riskId": "model-management-8.2",
1303
- "summary": "Model Management Risk 8.2"
1304
- },
1305
- {
1306
- "component": "Model Management",
1307
- "identifier": "8.4",
1308
- "riskId": "model-management-8.4",
1309
- "summary": "Model Management Risk 8.4"
1310
- },
1311
- {
1312
- "component": "Model Serving \u2014 Inference Requests",
1313
- "identifier": "9.1",
1314
- "riskId": "model-serving-inference-requests-9.1",
1315
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
1316
- },
1317
- {
1318
- "component": "Model Serving \u2014 Inference Requests",
1319
- "identifier": "9.2",
1320
- "riskId": "model-serving-inference-requests-9.2",
1321
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
1322
- },
1323
- {
1324
- "component": "Model Serving \u2014 Inference Requests",
1325
- "identifier": "9.5",
1326
- "riskId": "model-serving-inference-requests-9.5",
1327
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
1328
- },
1329
- {
1330
- "component": "Model Serving \u2014 Inference Requests",
1331
- "identifier": "9.6",
1332
- "riskId": "model-serving-inference-requests-9.6",
1333
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
1334
- },
1335
- {
1336
- "component": "Model Serving \u2014 Inference Requests",
1337
- "identifier": "9.7",
1338
- "riskId": "model-serving-inference-requests-9.7",
1339
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
1340
- },
1341
- {
1342
- "component": "Model Serving \u2014 Inference Response",
1343
- "identifier": "10.2",
1344
- "riskId": "model-serving-inference-response-10.2",
1345
- "summary": "Model Serving \u2014 Inference Response Risk 10.2"
1346
- },
1347
- {
1348
- "component": "Model Serving \u2014 Inference Response",
1349
- "identifier": "10.3",
1350
- "riskId": "model-serving-inference-response-10.3",
1351
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
1352
- },
1353
- {
1354
- "component": "Model Serving \u2014 Inference Response",
1355
- "identifier": "10.4",
1356
- "riskId": "model-serving-inference-response-10.4",
1357
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
1358
- },
1359
- {
1360
- "component": "Model Serving \u2014 Inference Response",
1361
- "identifier": "10.5",
1362
- "riskId": "model-serving-inference-response-10.5",
1363
- "summary": "Model Serving \u2014 Inference Response Risk 10.5"
1364
- }
1365
- ],
1366
  "description": "Databricks Platform secures model assets and their transfer with TLS 1.2+ in-transit encryption. Additionally, Unity Catalog\u2019s managed model registry provides encryption at rest for persisting models, further enhancing security.",
1367
  "controlCategory": "Out-of-the-box",
1368
- "readableControlId": "DASF 30"
 
 
1369
  },
1370
  {
1371
  "controlId": "DASF-31",
1372
  "title": "Secure model serving endpoints",
1373
- "risks": [
1374
- {
1375
- "component": "Model Management",
1376
- "identifier": "8.2",
1377
- "riskId": "model-management-8.2",
1378
- "summary": "Model Management Risk 8.2"
1379
- },
1380
- {
1381
- "component": "Model Management",
1382
- "identifier": "8.4",
1383
- "riskId": "model-management-8.4",
1384
- "summary": "Model Management Risk 8.4"
1385
- },
1386
- {
1387
- "component": "Model Serving \u2014 Inference Requests",
1388
- "identifier": "9.1",
1389
- "riskId": "model-serving-inference-requests-9.1",
1390
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
1391
- },
1392
- {
1393
- "component": "Model Serving \u2014 Inference Requests",
1394
- "identifier": "9.2",
1395
- "riskId": "model-serving-inference-requests-9.2",
1396
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
1397
- },
1398
- {
1399
- "component": "Model Serving \u2014 Inference Requests",
1400
- "identifier": "9.5",
1401
- "riskId": "model-serving-inference-requests-9.5",
1402
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
1403
- },
1404
- {
1405
- "component": "Model Serving \u2014 Inference Requests",
1406
- "identifier": "9.6",
1407
- "riskId": "model-serving-inference-requests-9.6",
1408
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
1409
- },
1410
- {
1411
- "component": "Model Serving \u2014 Inference Requests",
1412
- "identifier": "9.7",
1413
- "riskId": "model-serving-inference-requests-9.7",
1414
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
1415
- },
1416
- {
1417
- "component": "Model Serving \u2014 Inference Response",
1418
- "identifier": "10.2",
1419
- "riskId": "model-serving-inference-response-10.2",
1420
- "summary": "Model Serving \u2014 Inference Response Risk 10.2"
1421
- },
1422
- {
1423
- "component": "Model Serving \u2014 Inference Response",
1424
- "identifier": "10.3",
1425
- "riskId": "model-serving-inference-response-10.3",
1426
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
1427
- },
1428
- {
1429
- "component": "Model Serving \u2014 Inference Response",
1430
- "identifier": "10.4",
1431
- "riskId": "model-serving-inference-response-10.4",
1432
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
1433
- },
1434
- {
1435
- "component": "Model Serving \u2014 Inference Response",
1436
- "identifier": "10.5",
1437
- "riskId": "model-serving-inference-response-10.5",
1438
- "summary": "Model Serving \u2014 Inference Response Risk 10.5"
1439
- }
1440
- ],
1441
  "description": "Model serving involves risks of unauthorized data access and model tampering, which can compromise the integrity and reliability of machine learning deployments. Mosaic AI Model Serving addresses these concerns by providing secure-by-default REST API endpoints for MLflow machine learning models, featuring autoscaling, high availability and low latency.",
1442
  "controlCategory": "Out-of-the-box",
1443
- "readableControlId": "DASF 31"
 
 
1444
  },
1445
  {
1446
  "controlId": "DASF-32",
1447
  "title": "Streamline the usage and management of various large language model (LLM) providers",
1448
- "risks": [
1449
- {
1450
- "component": "Model Management",
1451
- "identifier": "8.2",
1452
- "riskId": "model-management-8.2",
1453
- "summary": "Model Management Risk 8.2"
1454
- },
1455
- {
1456
- "component": "Model Management",
1457
- "identifier": "8.4",
1458
- "riskId": "model-management-8.4",
1459
- "summary": "Model Management Risk 8.4"
1460
- },
1461
- {
1462
- "component": "Model Serving \u2014 Inference Requests",
1463
- "identifier": "9.1",
1464
- "riskId": "model-serving-inference-requests-9.1",
1465
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
1466
- },
1467
- {
1468
- "component": "Model Serving \u2014 Inference Requests",
1469
- "identifier": "9.2",
1470
- "riskId": "model-serving-inference-requests-9.2",
1471
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
1472
- },
1473
- {
1474
- "component": "Model Serving \u2014 Inference Requests",
1475
- "identifier": "9.5",
1476
- "riskId": "model-serving-inference-requests-9.5",
1477
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
1478
- },
1479
- {
1480
- "component": "Model Serving \u2014 Inference Requests",
1481
- "identifier": "9.6",
1482
- "riskId": "model-serving-inference-requests-9.6",
1483
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
1484
- },
1485
- {
1486
- "component": "Model Serving \u2014 Inference Requests",
1487
- "identifier": "9.7",
1488
- "riskId": "model-serving-inference-requests-9.7",
1489
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
1490
- },
1491
- {
1492
- "component": "Model Serving \u2014 Inference Response",
1493
- "identifier": "10.2",
1494
- "riskId": "model-serving-inference-response-10.2",
1495
- "summary": "Model Serving \u2014 Inference Response Risk 10.2"
1496
- },
1497
- {
1498
- "component": "Model Serving \u2014 Inference Response",
1499
- "identifier": "10.3",
1500
- "riskId": "model-serving-inference-response-10.3",
1501
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
1502
- },
1503
- {
1504
- "component": "Model Serving \u2014 Inference Response",
1505
- "identifier": "10.4",
1506
- "riskId": "model-serving-inference-response-10.4",
1507
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
1508
- },
1509
- {
1510
- "component": "Model Serving \u2014 Inference Response",
1511
- "identifier": "10.5",
1512
- "riskId": "model-serving-inference-response-10.5",
1513
- "summary": "Model Serving \u2014 Inference Response Risk 10.5"
1514
- }
1515
- ],
1516
  "description": "External models are third-party models hosted outside of Databricks. Supported by Model Serving AI Gateway, Databricks external models via the AI Gateway allow you to streamline the usage and management of various large language model (LLM) providers, such as OpenAI and Anthropic, within an organization. You can also use Mosaic AI Model Serving as a provider to serve predictive ML models, which offers rate limits for those endpoints. As part of this support, Model Serving offers a high-level interface that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM-related requests. In addition, Databricks support for external models provides centralized credential management. By storing API keys in one secure location, organizations can enhance their security posture by minimizing the exposure of sensitive API keys throughout the system. It also helps to prevent exposing these keys within code or requiring end users to manage keys safely.",
1517
  "controlCategory": "Out-of-the-box",
1518
- "readableControlId": "DASF 32"
 
 
1519
  },
1520
  {
1521
  "controlId": "DASF-33",
1522
  "title": "Manage credentials securely",
1523
- "risks": [
1524
- {
1525
- "component": "Model",
1526
- "identifier": "7.2",
1527
- "riskId": "model-7.2",
1528
- "summary": "Model Risk 7.2"
1529
- },
1530
- {
1531
- "component": "Model Management",
1532
- "identifier": "8.2",
1533
- "riskId": "model-management-8.2",
1534
- "summary": "Model Management Risk 8.2"
1535
- }
1536
- ],
1537
  "description": "Databricks Secrets stores your credentials and references them in notebooks, scripts, configuration properties and jobs. Integrating with heterogeneous systems requires managing a potentially large set of credentials and safely distributing them across an organization. Instead of directly entering your credentials into a notebook, use Databricks Secrets to store your credentials and reference them in notebooks and jobs to prevent credential leaks through models. Databricks secret management allows users to use and share credentials within Databricks securely. You can also choose to use a third-party secret management service, such as AWS Secrets Manager or a third-party secret manager.",
1538
  "controlCategory": "Implementation",
1539
- "readableControlId": "DASF 33"
 
 
1540
  },
1541
  {
1542
  "controlId": "DASF-34",
1543
  "title": "Run models in multiple layers of isolation",
1544
- "risks": [
1545
- {
1546
- "component": "Model",
1547
- "identifier": "7.1",
1548
- "riskId": "model-7.1",
1549
- "summary": "Model Risk 7.1"
1550
- },
1551
- {
1552
- "component": "Model Serving \u2014 Inference Requests",
1553
- "identifier": "9.3",
1554
- "riskId": "model-serving-inference-requests-9.3",
1555
- "summary": "Model Serving \u2014 Inference Requests Risk 9.3"
1556
- }
1557
- ],
1558
  "description": "Databricks Serverless Compute provides a secure-by-design model serving service featuring defense-in-depth controls like dedicated VMs, network segmentation, and encryption for data in transit and at rest. It adheres to the principle of least privilege for enhanced security.",
1559
  "controlCategory": "Out-of-the-box",
1560
- "readableControlId": "DASF 34"
 
 
1561
  },
1562
  {
1563
  "controlId": "DASF-35",
1564
  "title": "Track model performance",
1565
- "risks": [
1566
- {
1567
- "component": "Model Serving \u2014 Inference Response",
1568
- "identifier": "10.1",
1569
- "riskId": "model-serving-inference-response-10.1",
1570
- "summary": "Model Serving \u2014 Inference Response Risk 10.1"
1571
- }
1572
- ],
1573
  "description": "Databricks Lakehouse Monitoring provides performance metrics and data quality statistics across all account tables. It tracks the performance of machine learning models and model serving endpoints by observing inference tables with model inputs and predictions.",
1574
  "controlCategory": "Implementation",
1575
- "readableControlId": "DASF 35"
 
 
1576
  },
1577
  {
1578
  "controlId": "DASF-36",
1579
  "title": "Set up monitoring alerts",
1580
- "risks": [
1581
- {
1582
- "component": "Raw Data",
1583
- "identifier": "1.3",
1584
- "riskId": "raw-data-1.3",
1585
- "summary": "Raw Data Risk 1.3"
1586
- },
1587
- {
1588
- "component": "Model Serving \u2014 Inference Response",
1589
- "identifier": "10.1",
1590
- "riskId": "model-serving-inference-response-10.1",
1591
- "summary": "Model Serving \u2014 Inference Response Risk 10.1"
1592
- }
1593
- ],
1594
  "description": "Databricks SQL alerts can monitor the metrics table for security-based conditions, ensuring data integrity and timely response to potential issues: Statistic range Alert: Triggers when a specific statistic, such as the fraction of missing values, exceeds a predetermined threshold. Data distribution shift alert: Activates upon shifts in data distribution, as indicated by the drift metrics table. Baseline divergence alert: Alerts if data significantly diverges from a baseline, suggesting potential needs for data analysis or model retraining, particularly in InferenceLog analysis.",
1595
  "controlCategory": "Implementation",
1596
- "readableControlId": "DASF 36"
 
 
1597
  },
1598
  {
1599
  "controlId": "DASF-37",
1600
  "title": "Set up inference tables for monitoring and debugging models",
1601
- "risks": [
1602
- {
1603
- "component": "Model Serving \u2014 Inference Requests",
1604
- "identifier": "9.1",
1605
- "riskId": "model-serving-inference-requests-9.1",
1606
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
1607
- },
1608
- {
1609
- "component": "Model Serving \u2014 Inference Requests",
1610
- "identifier": "9.2",
1611
- "riskId": "model-serving-inference-requests-9.2",
1612
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
1613
- },
1614
- {
1615
- "component": "Model Serving \u2014 Inference Requests",
1616
- "identifier": "9.3",
1617
- "riskId": "model-serving-inference-requests-9.3",
1618
- "summary": "Model Serving \u2014 Inference Requests Risk 9.3"
1619
- },
1620
- {
1621
- "component": "Model Serving \u2014 Inference Requests",
1622
- "identifier": "9.4",
1623
- "riskId": "model-serving-inference-requests-9.4",
1624
- "summary": "Model Serving \u2014 Inference Requests Risk 9.4"
1625
- },
1626
- {
1627
- "component": "Model Serving \u2014 Inference Requests",
1628
- "identifier": "9.5",
1629
- "riskId": "model-serving-inference-requests-9.5",
1630
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
1631
- },
1632
- {
1633
- "component": "Model Serving \u2014 Inference Requests",
1634
- "identifier": "9.6",
1635
- "riskId": "model-serving-inference-requests-9.6",
1636
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
1637
- },
1638
- {
1639
- "component": "Model Serving \u2014 Inference Requests",
1640
- "identifier": "9.7",
1641
- "riskId": "model-serving-inference-requests-9.7",
1642
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
1643
- },
1644
- {
1645
- "component": "Model Serving \u2014 Inference Response",
1646
- "identifier": "10.1",
1647
- "riskId": "model-serving-inference-response-10.1",
1648
- "summary": "Model Serving \u2014 Inference Response Risk 10.1"
1649
- },
1650
- {
1651
- "component": "Model Serving \u2014 Inference Response",
1652
- "identifier": "10.3",
1653
- "riskId": "model-serving-inference-response-10.3",
1654
- "summary": "Model Serving \u2014 Inference Response Risk 10.3"
1655
- },
1656
- {
1657
- "component": "Model Serving \u2014 Inference Response",
1658
- "identifier": "10.4",
1659
- "riskId": "model-serving-inference-response-10.4",
1660
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
1661
- }
1662
- ],
1663
  "description": "Databricks inference tables automatically record incoming requests and outgoing responses to model serving endpoints, storing them as a Unity Catalog Delta table. This table can be used to monitor, debug and enhance ML models. By coupling inference tables with Lakehouse Monitoring, customers can also set up automated monitoring jobs and alerts on inference tables, such as monitoring text quality or toxicity from endpoints serving LLMs, etc. Critical applications of an inference table include: Retraining dataset creation, Quality monitoring, Diagnostics and debugging, and Mislabeled data identification.",
1664
  "controlCategory": "Implementation",
1665
- "readableControlId": "DASF 37"
 
 
1666
  },
1667
  {
1668
  "controlId": "DASF-38",
1669
  "title": "Platform security \u2014 vulnerability management",
1670
- "risks": [
1671
- {
1672
- "component": "Platform",
1673
- "identifier": "12.1",
1674
- "riskId": "platform-12.1",
1675
- "summary": "Platform Risk 12.1"
1676
- }
1677
- ],
1678
  "description": "Managing vulnerabilities entails addressing complex security challenges with performance impact considerations. Databricks\u2019 formal and documented vulnerability management program, overseen by the chief security officer (CSO), is approved by management, undergoes annual reviews and is communicated to all relevant internal parties. The policy requires that vulnerabilities be addressed based on severity: critical vulnerabilities within 14 days, high severity within 30 days and medium severity within 60 days.",
1679
  "controlCategory": "Out-of-the-box",
1680
- "readableControlId": "DASF 38"
 
 
1681
  },
1682
  {
1683
  "controlId": "DASF-39",
1684
  "title": "Platform security \u2014 Incident Response Team",
1685
- "risks": [
1686
- {
1687
- "component": "Platform",
1688
- "identifier": "12.2",
1689
- "riskId": "platform-12.2",
1690
- "summary": "Platform Risk 12.2"
1691
- },
1692
- {
1693
- "component": "Platform",
1694
- "identifier": "12.3",
1695
- "riskId": "platform-12.3",
1696
- "summary": "Platform Risk 12.3"
1697
- }
1698
- ],
1699
  "description": "Databricks has established a formal incident response plan that outlines key elements such as roles, responsibilities, escalation paths and external communication protocols. The platform handles over 9TB of audit logs daily, aiding customer and Databricks security investigations. A dedicated security incident response team operates an internal Databricks instance, consolidating essential log sources for thorough security analysis. Databricks ensures continual operational readiness with a 24/7/365 on-call rotation. Additionally, a proactive hunting program and a specialized detection team support the incident response program.",
1700
  "controlCategory": "Out-of-the-box",
1701
- "readableControlId": "DASF 39"
 
 
1702
  },
1703
  {
1704
  "controlId": "DASF-40",
1705
  "title": "Platform security \u2014 internal access",
1706
- "risks": [
1707
- {
1708
- "component": "Platform",
1709
- "identifier": "12.4",
1710
- "riskId": "platform-12.4",
1711
- "summary": "Platform Risk 12.4"
1712
- }
1713
- ],
1714
  "description": "Databricks personnel, by default, do not have access to customer workspaces or production environments. Access may be temporarily requested by Databricks staff for purposes such as investigating outages, security events or supporting deployments. Customers have the option to disable this access. Additionally, staff activity within these environments is recorded in customer audit logs. Accessing these areas requires multi-factor authentication, and employees must connect to the Databricks VPN.",
1715
  "controlCategory": "Out-of-the-box",
1716
- "readableControlId": "DASF 40"
 
 
1717
  },
1718
  {
1719
  "controlId": "DASF-41",
1720
  "title": "Platform security \u2014 secure SDLC",
1721
- "risks": [
1722
- {
1723
- "component": "Platform",
1724
- "identifier": "12.5",
1725
- "riskId": "platform-12.5",
1726
- "summary": "Platform Risk 12.5"
1727
- }
1728
- ],
1729
  "description": "Databricks engineering integrates security throughout the software development lifecycle (SDLC), encompassing both technical and process-level controls under the oversight of our chief security officer (CSO). Activities within our SDLC include: Code peer reviews, Static and dynamic scans for code and containers, including dependencies, Feature-level security reviews, Annual software engineering security training, and Cross-organizational collaborations between security, product management, product security and security champions. These development controls are augmented by internal and external penetration testing programs, with findings tracked for resolution and reported to our executive team. Databricks' processes undergo an independent annual review, the results of which are published in our SOC 2 Type 2 report, available upon request.",
1730
  "controlCategory": "Out-of-the-box",
1731
- "readableControlId": "DASF 41"
 
 
1732
  },
1733
  {
1734
  "controlId": "DASF-42",
1735
  "title": "Employ data-centric MLOps and LLMOps",
1736
- "risks": [
1737
- {
1738
- "component": "Data Prep",
1739
- "identifier": "2.2",
1740
- "riskId": "data-prep-2.2",
1741
- "summary": "Data Prep Risk 2.2"
1742
- },
1743
- {
1744
- "component": "Data Prep",
1745
- "identifier": "2.3",
1746
- "riskId": "data-prep-2.3",
1747
- "summary": "Data Prep Risk 2.3"
1748
- },
1749
- {
1750
- "component": "Data Prep",
1751
- "identifier": "2.4",
1752
- "riskId": "data-prep-2.4",
1753
- "summary": "Data Prep Risk 2.4"
1754
- },
1755
- {
1756
- "component": "Governance",
1757
- "identifier": "4.2",
1758
- "riskId": "governance-4.2",
1759
- "summary": "Governance Risk 4.2"
1760
- },
1761
- {
1762
- "component": "Algorithms",
1763
- "identifier": "5.1",
1764
- "riskId": "algorithms-5.1",
1765
- "summary": "Algorithms Risk 5.1"
1766
- },
1767
- {
1768
- "component": "Algorithms",
1769
- "identifier": "5.3",
1770
- "riskId": "algorithms-5.3",
1771
- "summary": "Algorithms Risk 5.3"
1772
- },
1773
- {
1774
- "component": "Evaluation",
1775
- "identifier": "6.1",
1776
- "riskId": "evaluation-6.1",
1777
- "summary": "Evaluation Risk 6.1"
1778
- },
1779
- {
1780
- "component": "Model",
1781
- "identifier": "7.1",
1782
- "riskId": "model-7.1",
1783
- "summary": "Model Risk 7.1"
1784
- },
1785
- {
1786
- "component": "Model",
1787
- "identifier": "7.2",
1788
- "riskId": "model-7.2",
1789
- "summary": "Model Risk 7.2"
1790
- },
1791
- {
1792
- "component": "Model",
1793
- "identifier": "7.3",
1794
- "riskId": "model-7.3",
1795
- "summary": "Model Risk 7.3"
1796
- },
1797
- {
1798
- "component": "Model Management",
1799
- "identifier": "8.3",
1800
- "riskId": "model-management-8.3",
1801
- "summary": "Model Management Risk 8.3"
1802
- },
1803
- {
1804
- "component": "Operations",
1805
- "identifier": "11.1",
1806
- "riskId": "operations-11.1",
1807
- "summary": "Operations Risk 11.1"
1808
- }
1809
- ],
1810
  "description": "MLOps enhances efficiency, scalability, security and risk reduction in machine learning projects. Databricks integrates with MLflow, focusing on enterprise reliability, security and scalability for managing the machine learning lifecycle. The latest update to MLflow introduces new LLMOps features for better management and deployment of large language models (LLMs). This includes integrations with Hugging Face Transformers, OpenAI and the external models in Mosaic AI Model Serving. MLflow also integrates with LangChain and a prompt engineering UI, facilitating generative AI application development for use cases such as chatbots, document summarization and text classification.",
1811
  "controlCategory": "Implementation",
1812
- "readableControlId": "DASF 42"
 
 
1813
  },
1814
  {
1815
  "controlId": "DASF-43",
1816
  "title": "Use access control lists",
1817
- "risks": [
1818
- {
1819
- "component": "Data Prep",
1820
- "identifier": "2.3",
1821
- "riskId": "data-prep-2.3",
1822
- "summary": "Data Prep Risk 2.3"
1823
- },
1824
- {
1825
- "component": "Algorithms",
1826
- "identifier": "5.3",
1827
- "riskId": "algorithms-5.3",
1828
- "summary": "Algorithms Risk 5.3"
1829
- },
1830
- {
1831
- "component": "Model",
1832
- "identifier": "7.1",
1833
- "riskId": "model-7.1",
1834
- "summary": "Model Risk 7.1"
1835
- }
1836
- ],
1837
  "description": "Databricks access control lists (ACLs) enable you to configure permissions for accessing and interacting with workspace objects, including folders, notebooks, experiments, models, clusters, pools, jobs, Delta Live Tables pipelines, alerts, dashboards, queries and SQL warehouses.",
1838
  "controlCategory": "Implementation",
1839
- "readableControlId": "DASF 43"
 
 
1840
  },
1841
  {
1842
  "controlId": "DASF-44",
1843
  "title": "Triggering actions in response to a specific event",
1844
- "risks": [
1845
- {
1846
- "component": "Evaluation",
1847
- "identifier": "6.1",
1848
- "riskId": "evaluation-6.1",
1849
- "summary": "Evaluation Risk 6.1"
1850
- },
1851
- {
1852
- "component": "Operations",
1853
- "identifier": "11.1",
1854
- "riskId": "operations-11.1",
1855
- "summary": "Operations Risk 11.1"
1856
- }
1857
- ],
1858
  "description": "Webhooks in the MLflow Model Registry enable you to automate machine learning workflow by triggering actions in response to specific events. These webhooks facilitate seamless integrations, allowing for the automatic execution of various processes. For example, webhooks are used for: CI workflow trigger (Validate your model automatically when creating a new version), Team notifications (Send alerts through a messaging app when a model stage transition request is received), Model fairness evaluation (Invoke a workflow to assess model fairness and bias upon a production transition request), and Automated deployment (Trigger a deployment pipeline when a new tag is created on a model).",
1859
  "controlCategory": "Implementation",
1860
- "readableControlId": "DASF 44"
 
 
1861
  },
1862
  {
1863
  "controlId": "DASF-45",
1864
  "title": "Evaluate models",
1865
- "risks": [
1866
- {
1867
- "component": "Evaluation",
1868
- "identifier": "6.1",
1869
- "riskId": "evaluation-6.1",
1870
- "summary": "Evaluation Risk 6.1"
1871
- },
1872
- {
1873
- "component": "Evaluation",
1874
- "identifier": "6.2",
1875
- "riskId": "evaluation-6.2",
1876
- "summary": "Evaluation Risk 6.2"
1877
- },
1878
- {
1879
- "component": "Model",
1880
- "identifier": "7.3",
1881
- "riskId": "model-7.3",
1882
- "summary": "Model Risk 7.3"
1883
- },
1884
- {
1885
- "component": "Model Serving \u2014 Inference Requests",
1886
- "identifier": "9.5",
1887
- "riskId": "model-serving-inference-requests-9.5",
1888
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
1889
- },
1890
- {
1891
- "component": "Model Serving \u2014 Inference Requests",
1892
- "identifier": "9.6",
1893
- "riskId": "model-serving-inference-requests-9.6",
1894
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
1895
- },
1896
- {
1897
- "component": "Model Serving \u2014 Inference Response",
1898
- "identifier": "10.4",
1899
- "riskId": "model-serving-inference-response-10.4",
1900
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
1901
- },
1902
- {
1903
- "component": "Operations",
1904
- "identifier": "11.1",
1905
- "riskId": "operations-11.1",
1906
- "summary": "Operations Risk 11.1"
1907
- }
1908
- ],
1909
  "description": "Model evaluation is a critical component of the machine learning lifecycle. It provides data scientists with the tools to measure, interpret and explain the performance of their models. MLflow plays a critical role in accelerating model development by offering insights into the reasons behind a model's performance and guiding improvements and iterations. MLflow offers many industry-standard native evaluation metrics for classical machine learning algorithms and LLMs, and also facilitates the use of custom evaluation metrics.",
1910
  "controlCategory": "Implementation",
1911
- "readableControlId": "DASF 45"
 
 
1912
  },
1913
  {
1914
  "controlId": "DASF-46",
1915
  "title": "Store and retrieve embeddings securely",
1916
- "risks": [
1917
- {
1918
- "component": "Model Serving \u2014 Inference Requests",
1919
- "identifier": "9.1",
1920
- "riskId": "model-serving-inference-requests-9.1",
1921
- "summary": "Model Serving \u2014 Inference Requests Risk 9.1"
1922
- },
1923
- {
1924
- "component": "Model Serving \u2014 Inference Requests",
1925
- "identifier": "9.2",
1926
- "riskId": "model-serving-inference-requests-9.2",
1927
- "summary": "Model Serving \u2014 Inference Requests Risk 9.2"
1928
- },
1929
- {
1930
- "component": "Model Serving \u2014 Inference Requests",
1931
- "identifier": "9.5",
1932
- "riskId": "model-serving-inference-requests-9.5",
1933
- "summary": "Model Serving \u2014 Inference Requests Risk 9.5"
1934
- },
1935
- {
1936
- "component": "Model Serving \u2014 Inference Requests",
1937
- "identifier": "9.6",
1938
- "riskId": "model-serving-inference-requests-9.6",
1939
- "summary": "Model Serving \u2014 Inference Requests Risk 9.6"
1940
- },
1941
- {
1942
- "component": "Model Serving \u2014 Inference Requests",
1943
- "identifier": "9.7",
1944
- "riskId": "model-serving-inference-requests-9.7",
1945
- "summary": "Model Serving \u2014 Inference Requests Risk 9.7"
1946
- },
1947
- {
1948
- "component": "Model Serving \u2014 Inference Requests",
1949
- "identifier": "9.8",
1950
- "riskId": "model-serving-inference-requests-9.8",
1951
- "summary": "Model Serving \u2014 Inference Requests Risk 9.8"
1952
- },
1953
- {
1954
- "component": "Model Serving \u2014 Inference Requests",
1955
- "identifier": "9.9",
1956
- "riskId": "model-serving-inference-requests-9.9",
1957
- "summary": "Model Serving \u2014 Inference Requests Risk 9.9"
1958
- },
1959
- {
1960
- "component": "Model Serving \u2014 Inference Requests",
1961
- "identifier": "9.10",
1962
- "riskId": "model-serving-inference-requests-9.10",
1963
- "summary": "Model Serving \u2014 Inference Requests Risk 9.10"
1964
- },
1965
- {
1966
- "component": "Model Serving \u2014 Inference Response",
1967
- "identifier": "10.4",
1968
- "riskId": "model-serving-inference-response-10.4",
1969
- "summary": "Model Serving \u2014 Inference Response Risk 10.4"
1970
- }
1971
- ],
1972
  "description": "Mosaic AI Vector Search is a vector database that is built into the Databricks Data Intelligence Platform and integrated with its governance and productivity tools. A vector database is a database that is optimized to store and retrieve embeddings. Embeddings are mathematical representations of the semantic content of data, typically text or image data. Embeddings are usually generated by feature extraction models for text, image, audio or multi-modal data, and are a key component of many GenAI applications that depend on finding documents or images that are similar to each other. Examples are RAG systems, recommender systems, and image and video recognition. Databricks implements the following security controls to protect your data: Every customer request to Vector Search is logically isolated, authenticated and authorized, and Mosaic AI Vector Search encrypts all data at rest (AES-256) and in transit (TLS 1.2+).",
1973
  "controlCategory": "Implementation",
1974
- "readableControlId": "DASF 46"
 
 
1975
  },
1976
  {
1977
  "controlId": "DASF-47",
1978
  "title": "Compare LLM outputs on set prompts",
1979
- "risks": [
1980
- {
1981
- "component": "Evaluation",
1982
- "identifier": "6.2",
1983
- "riskId": "evaluation-6.2",
1984
- "summary": "Evaluation Risk 6.2"
1985
- }
1986
- ],
1987
  "description": "New, no-code visual tools allow users to compare models' output based on set prompts, which are automatically tracked within MLflow. With integration into Mosaic AI Model Serving, customers can deploy the best model to production. The AI Playground is a chat-like environment where you can test, prompt and compare LLMs.",
1988
  "controlCategory": "Implementation",
1989
- "readableControlId": "DASF 47"
 
 
1990
  },
1991
  {
1992
  "controlId": "DASF-48",
1993
  "title": "Use hardened Runtime for Machine Learning",
1994
- "risks": [
1995
- {
1996
- "component": "Model",
1997
- "identifier": "7.3",
1998
- "riskId": "model-7.3",
1999
- "summary": "Model Risk 7.3"
2000
- }
2001
- ],
2002
  "description": "Databricks Runtime for Machine Learning (Databricks Runtime ML) now automates cluster creation with versatile infrastructure, encompassing pre-built ML/DL libraries and custom library integration. Enhanced scalability and cost management tools optimize performance and expenditure. The refined user interface caters to various expertise levels, while new collaboration features support team-based projects. Comprehensive training resources and detailed documentation complement these improvements.",
2003
  "controlCategory": "Out-of-the-box",
2004
- "readableControlId": "DASF 48"
 
 
2005
  },
2006
  {
2007
  "controlId": "DASF-49",
2008
  "title": "Automate LLM evaluation",
2009
- "risks": [
2010
- {
2011
- "component": "Evaluation",
2012
- "identifier": "6.1",
2013
- "riskId": "evaluation-6.1",
2014
- "summary": "Evaluation Risk 6.1"
2015
- },
2016
- {
2017
- "component": "Model Serving \u2014 Inference Requests",
2018
- "identifier": "9.8",
2019
- "riskId": "model-serving-inference-requests-9.8",
2020
- "summary": "Model Serving \u2014 Inference Requests Risk 9.8"
2021
- }
2022
- ],
2023
  "description": "The \"LLM-as-a-judge\" feature in MLflow 2.8 automates LLM evaluation, offering a practical alternative to human judgment. It's designed to be efficient and cost-effective, maintaining consistency with human scores. This tool supports various metrics, including standard and customizable GenAI metrics, and allows users to select an LLM as a judge and define specific grading criteria.",
2024
  "controlCategory": "Implementation",
2025
- "readableControlId": "DASF 49"
 
 
2026
  },
2027
  {
2028
  "controlId": "DASF-50",
2029
  "title": "Platform compliance",
2030
- "risks": [
2031
- {
2032
- "component": "Platform",
2033
- "identifier": "12.6",
2034
- "riskId": "platform-12.6",
2035
- "summary": "Platform Risk 12.6"
2036
- }
2037
- ],
2038
  "description": "Develop your solutions on a platform created using some of the most rigorous security and compliance standards in the world. Get independent audit reports verifying that Databricks adheres to security controls for ISO 27001, ISO 27018, SOC 1, SOC 2, FedRAMP, HITRUST, IRAP, etc.",
2039
  "controlCategory": "Out-of-the-box",
2040
- "readableControlId": "DASF 50"
 
 
2041
  },
2042
  {
2043
  "controlId": "DASF-51",
2044
  "title": "Share data and AI assets securely",
2045
- "risks": [
2046
- {
2047
- "component": "Raw Data",
2048
- "identifier": "1.1",
2049
- "riskId": "raw-data-1.1",
2050
- "summary": "Raw Data Risk 1.1"
2051
- },
2052
- {
2053
- "component": "Raw Data",
2054
- "identifier": "1.6",
2055
- "riskId": "raw-data-1.6",
2056
- "summary": "Raw Data Risk 1.6"
2057
- },
2058
- {
2059
- "component": "Raw Data",
2060
- "identifier": "1.7",
2061
- "riskId": "raw-data-1.7",
2062
- "summary": "Raw Data Risk 1.7"
2063
- },
2064
- {
2065
- "component": "Datasets",
2066
- "identifier": "3.1",
2067
- "riskId": "datasets-3.1",
2068
- "summary": "Datasets Risk 3.1"
2069
- },
2070
- {
2071
- "component": "Model Management",
2072
- "identifier": "8.1",
2073
- "riskId": "model-management-8.1",
2074
- "summary": "Model Management Risk 8.1"
2075
- },
2076
- {
2077
- "component": "Model Management",
2078
- "identifier": "8.2",
2079
- "riskId": "model-management-8.2",
2080
- "summary": "Model Management Risk 8.2"
2081
- }
2082
- ],
2083
  "description": "Databricks Delta Sharing lets you share data and AI assets securely in Databricks with users outside your organization, whether those users use Databricks or not.",
2084
  "controlCategory": "Out-of-the-box",
2085
- "readableControlId": "DASF 51"
 
 
2086
  },
2087
  {
2088
  "controlId": "DASF-52",
2089
  "title": "Source code control",
2090
- "risks": [
2091
- {
2092
- "component": "Data Prep",
2093
- "identifier": "2.1",
2094
- "riskId": "data-prep-2.1",
2095
- "summary": "Data Prep Risk 2.1"
2096
- },
2097
- {
2098
- "component": "Model",
2099
- "identifier": "7.4",
2100
- "riskId": "model-7.4",
2101
- "summary": "Model Risk 7.4"
2102
- }
2103
- ],
2104
  "description": "Databricks' Git Repository integration supports effective code and third-party libraries management, enhancing customer control over their development environment.",
2105
  "controlCategory": "Out-of-the-box",
2106
- "readableControlId": "DASF 52"
 
 
2107
  },
2108
  {
2109
  "controlId": "DASF-53",
2110
  "title": "Third-party library control",
2111
- "risks": [
2112
- {
2113
- "component": "Algorithms",
2114
- "identifier": "5.4",
2115
- "riskId": "algorithms-5.4",
2116
- "summary": "Algorithms Risk 5.4"
2117
- },
2118
- {
2119
- "component": "Model",
2120
- "identifier": "7.3",
2121
- "riskId": "model-7.3",
2122
- "summary": "Model Risk 7.3"
2123
- },
2124
- {
2125
- "component": "Model",
2126
- "identifier": "7.4",
2127
- "riskId": "model-7.4",
2128
- "summary": "Model Risk 7.4"
2129
- }
2130
- ],
2131
  "description": "Databricks' library management system allows administrators to manage the installation and usage of third-party libraries effectively. This feature enhances the security and efficiency of systems, pipelines and data by giving administrators precise control over their development environment.",
2132
  "controlCategory": "Out-of-the-box",
2133
- "readableControlId": "DASF 53"
 
 
2134
  }
2135
  ]
 
2
  {
3
  "controlId": "DASF-1",
4
  "title": "SSO with IdP and MFA",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "description": "Implementing single sign-on with an identity provider\u2019s (IdP) multi-factor authentication is critical for secure authentication. It adds an extra layer of security, ensuring that only authorized users access the Databricks Platform.",
6
  "controlCategory": "Configuration",
7
+ "readableControlId": "DASF 1",
8
+ "severity": "medium",
9
+ "automationPlatforms": []
10
  },
11
  {
12
  "controlId": "DASF-2",
13
  "title": "Sync users and groups",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  "description": "Synchronizing users and groups from your identity provider (IdP) with Databricks using the SCIM standard facilitates consistent and automated user provisioning for enhancing security.",
15
  "controlCategory": "Configuration",
16
+ "readableControlId": "DASF 2",
17
+ "severity": "medium",
18
+ "automationPlatforms": []
19
  },
20
  {
21
  "controlId": "DASF-3",
22
  "title": "Restrict access using IP access lists",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  "description": "Configure IP access lists to restrict authentication to Databricks from specific IP ranges, such as VPNs or office networks, and strengthen network security by preventing unauthorized access from untrusted locations.",
24
  "controlCategory": "Configuration",
25
+ "readableControlId": "DASF 3",
26
+ "severity": "medium",
27
+ "automationPlatforms": []
28
  },
29
  {
30
  "controlId": "DASF-4",
31
  "title": "Restrict access using private link",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  "description": "Use AWS PrivateLink, Azure Private Link or GCP Private Service Connect to create a private network route between the customer and the Databricks control plane or the control plane and the customer\u2019s compute plane environments to enhance data security by avoiding public internet exposure.",
33
  "controlCategory": "Configuration",
34
+ "readableControlId": "DASF 4",
35
+ "severity": "medium",
36
+ "automationPlatforms": []
37
  },
38
  {
39
  "controlId": "DASF-5",
40
  "title": "Control access to data and other objects",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  "description": "Implementing Unity Catalog for unified permissions management and assets simplifies access control and enhances security.",
42
  "controlCategory": "Implementation",
43
+ "readableControlId": "DASF 5",
44
+ "severity": "medium",
45
+ "automationPlatforms": []
46
  },
47
  {
48
  "controlId": "DASF-6",
49
  "title": "Classify data",
 
 
 
 
 
 
 
 
50
  "description": "Tags are attributes containing keys and optional values that you can apply to different securable objects in Unity Catalog. Organizing securable objects with tags in Unity Catalog aids in efficient data management, data discovery and classification, essential for handling large datasets.",
51
  "controlCategory": "Implementation",
52
+ "readableControlId": "DASF 6",
53
+ "severity": "medium",
54
+ "automationPlatforms": []
55
  },
56
  {
57
  "controlId": "DASF-7",
58
  "title": "Enforce Data Quality Checks on Batch and Streaming Datasets",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  "description": "Databricks Delta Live Tables (DLT) simplifies ETL development with declarative pipelines that integrate quality control checks and performance monitoring.",
60
  "controlCategory": "Implementation",
61
+ "readableControlId": "DASF 7",
62
+ "severity": "medium",
63
+ "automationPlatforms": []
64
  },
65
  {
66
  "controlId": "DASF-8",
67
  "title": "Encrypt data at rest",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  "description": "Databricks supports customer-managed encryption keys to strengthen data at rest protection and greater access control.",
69
  "controlCategory": "Configuration",
70
+ "readableControlId": "DASF 8",
71
+ "severity": "medium",
72
+ "automationPlatforms": []
73
  },
74
  {
75
  "controlId": "DASF-9",
76
  "title": "Encrypt data in transit",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  "description": "Databricks supports TLS 1.2+ encryption to protect customer data during transit. This applies to data transfer between the customer and the Databricks control plane and within the compute plane. Customers can also secure inter-cluster communications within the compute plane per their security requirements.",
78
  "controlCategory": "Out-of-the-box",
79
+ "readableControlId": "DASF 9",
80
+ "severity": "medium",
81
+ "automationPlatforms": []
82
  },
83
  {
84
  "controlId": "DASF-10",
85
  "title": "Version data",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  "description": "Store data in a lakehouse architecture using Delta tables. Delta tables can be versioned to revert any user\u2019s or malicious actor\u2019s poisoning of data. Data can be stored in a lakehouse architecture in the customer\u2019s cloud account. Both raw data and feature tables are stored as Delta tables with access controls to determine who can read and modify them. Data lineage with UC helps track and audit changes and the origin of ML data sources. Each operation that modifies a Delta Lake table creates a new table version. User actions are tracked and audited, and lineage of transformations is available all in the same platform. You can use history information to audit operations, roll back a table or query a table at a specific point in time using time travel.",
87
  "controlCategory": "Implementation",
88
+ "readableControlId": "DASF 10",
89
+ "severity": "medium",
90
+ "automationPlatforms": []
91
  },
92
  {
93
  "controlId": "DASF-11",
94
  "title": "Capture and view data lineage",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  "description": "Unity Catalog tracks and visualizes real-time data lineage across all languages to the column level, providing a traceable history of an object from notebooks, workflows, models and dashboards. This enhances transparency and compliance, with accessibility provided through the Catalog Explorer.",
96
  "controlCategory": "Out-of-the-box",
97
+ "readableControlId": "DASF 11",
98
+ "severity": "medium",
99
+ "automationPlatforms": []
100
  },
101
  {
102
  "controlId": "DASF-12",
103
  "title": "Delete records from datasets",
 
 
 
 
 
 
 
 
104
  "description": "Data governance in Delta Lake, the lakehouse storage layer, utilizes its atomicity, consistency, isolation, durability (ACID) properties for effective data management. This includes the capability to remove data based on specific predicates from a Delta Table, including the complete removal of data\u2019s history, supporting compliance with regulations like GDPR and CCPA.",
105
  "controlCategory": "Implementation",
106
+ "readableControlId": "DASF 12",
107
+ "severity": "medium",
108
+ "automationPlatforms": []
109
  },
110
  {
111
  "controlId": "DASF-13",
112
  "title": "Use near real-time data",
 
 
 
 
 
 
 
 
113
  "description": "Use Databricks for near real-time data ingestion, processing, machine learning, and AI for streaming data.",
114
  "controlCategory": "Implementation",
115
+ "readableControlId": "DASF 13",
116
+ "severity": "medium",
117
+ "automationPlatforms": []
118
  },
119
  {
120
  "controlId": "DASF-14",
121
  "title": "Audit actions performed on datasets",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  "description": "Databricks auditing, enhanced by Unity Catalog\u2019s events, delivers fine-grained visibility into data access and user activities. This is vital for robust data governance and security, especially in regulated industries. It enables organizations to proactively identify and manage overentitled users, enhancing data security and ensuring compliance.",
123
  "controlCategory": "Implementation",
124
+ "readableControlId": "DASF 14",
125
+ "severity": "medium",
126
+ "automationPlatforms": []
127
  },
128
  {
129
  "controlId": "DASF-15",
130
  "title": "Explore datasets and identify problems",
 
 
 
 
 
 
 
 
131
  "description": "Iteratively explore, share and prep data for the machine learning lifecycle by creating reproducible, editable and shareable datasets, tables and visualizations. Within Databricks this EDA process can be accelerated with Mosaic AI AutoML. AutoML not only generates baseline models given a dataset, but also provides the underlying model training code in the form of a Python notebook. Notably for EDA, AutoML calculates summary statistics on the provided dataset, creating a notebook for the data scientist to review and adapt.",
132
  "controlCategory": "Implementation",
133
+ "readableControlId": "DASF 15",
134
+ "severity": "medium",
135
+ "automationPlatforms": []
136
  },
137
  {
138
  "controlId": "DASF-16",
139
  "title": "Secure model features",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  "description": "Databricks Feature Store is a centralized repository that enables data scientists to find and share features and also ensures that the same code used to compute the feature values is used for model training and inference. Unity Catalog\u2019s capabilities, such as security, lineage, table history, tagging and cross-workspace access, are automatically available to the feature table to reduce the risk of malicious actors manipulating the features that feed into ML training.",
141
  "controlCategory": "Implementation",
142
+ "readableControlId": "DASF 16",
143
+ "severity": "medium",
144
+ "automationPlatforms": []
145
  },
146
  {
147
  "controlId": "DASF-17",
148
  "title": "Track and reproduce the training data used for ML model training",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  "description": "MLflow with Delta Lake tracks the training data used for ML model training. It also enables the identification of specific ML models and runs derived from particular datasets for regulatory and auditable attribution.",
150
  "controlCategory": "Configuration",
151
+ "readableControlId": "DASF 17",
152
+ "severity": "medium",
153
+ "automationPlatforms": []
154
  },
155
  {
156
  "controlId": "DASF-18",
157
  "title": "Govern model assets",
 
 
 
 
 
 
 
 
158
  "description": "With Unity Catalog, organizations can implement a unified governance framework for their structured and unstructured data, machine learning models, notebooks, features, functions, and files, enhancing security and compliance across clouds and platforms.",
159
  "controlCategory": "Configuration",
160
+ "readableControlId": "DASF 18",
161
+ "severity": "medium",
162
+ "automationPlatforms": []
163
  },
164
  {
165
  "controlId": "DASF-19",
166
  "title": "Manage end-to-end machine learning lifecycle",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  "description": "Databricks includes a managed version of MLflow featuring enterprise security controls and high availability. It supports functionalities like experiments, run management and notebook revision capture. MLflow on Databricks allows tracking and measuring machine learning model training runs, logging model training artifacts and securing machine learning projects.",
168
  "controlCategory": "Implementation",
169
+ "readableControlId": "DASF 19",
170
+ "severity": "medium",
171
+ "automationPlatforms": []
172
  },
173
  {
174
  "controlId": "DASF-20",
175
  "title": "Track ML training runs",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  "description": "MLflow tracking facilitates the automated recording and retrieval of experiment details, including algorithms, code, datasets, parameters, configurations, signatures and artifacts.",
177
  "controlCategory": "Implementation",
178
+ "readableControlId": "DASF 20",
179
+ "severity": "medium",
180
+ "automationPlatforms": []
181
  },
182
  {
183
  "controlId": "DASF-21",
184
  "title": "Monitor data and AI system from a single pane of glass",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  "description": "Databricks Lakehouse Monitoring offers a single pane of glass to centrally track tables\u2019 data quality and statistical properties and automatically classifies data. It can also track the performance of machine learning models and model serving endpoints by monitoring inference tables containing model inputs and predictions through a single pane of glass.",
186
  "controlCategory": "Implementation",
187
+ "readableControlId": "DASF 21",
188
+ "severity": "medium",
189
+ "automationPlatforms": []
190
  },
191
  {
192
  "controlId": "DASF-22",
193
  "title": "Build models with all representative, accurate and relevant data sources",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  "description": "Harnessing internal data and intellectual property to customize large AI models can offer a significant competitive edge. However, this process can be complex, involving coordination across various parts of the organization. The Data Intelligence Platform addresses this challenge by integrating data across traditionally isolated departments and systems. This integration facilitates a more cohesive data and AI strategy, enabling the effective training, testing and evaluation of models using a comprehensive dataset. Use caution when preparing data for traditional models and GenAI training to ensure that you are not unintentionally including data that causes legal conflicts, such as copyright violations, privacy violations or HIPAA violations.",
195
  "controlCategory": "Implementation",
196
+ "readableControlId": "DASF 22",
197
+ "severity": "medium",
198
+ "automationPlatforms": []
199
  },
200
  {
201
  "controlId": "DASF-23",
202
  "title": "Register, version, approve, promote and deploy model",
 
 
 
 
 
 
 
 
203
  "description": "MLflow Model Registry supports managing the machine learning model lifecycle with capabilities for lineage tracking, versioning, staging and model serving.",
204
  "controlCategory": "Implementation",
205
+ "readableControlId": "DASF 23",
206
+ "severity": "medium",
207
+ "automationPlatforms": []
208
  },
209
  {
210
  "controlId": "DASF-24",
211
  "title": "Control access to models and model assets",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  "description": "Organizations commonly encounter challenges in tracking and controlling access to ML models, auditing their usage, and understanding their evolution in complex machine learning workflows. Unity Catalog integrates with the MLflow Model Registry across model lifecycles. This approach simplifies the management and oversight of ML models, proving particularly valuable in environments with multiple teams and diverse projects.",
213
  "controlCategory": "Implementation",
214
+ "readableControlId": "DASF 24",
215
+ "severity": "medium",
216
+ "automationPlatforms": []
217
  },
218
  {
219
  "controlId": "DASF-25",
220
  "title": "Use retrieval augmented generation (RAG) with large language models (LLMs)",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  "description": "Generating relevant and accurate responses in large language models (LLMs) while avoiding hallucinations requires grounding them in domain-specific knowledge. Retrieval augmented generation (RAG) addresses this by breaking down extensive datasets into manageable segments (\u201cchunks\u201d) that are \u201cvector embedded.\u201d These vector embeddings are mathematical representations that help the model understand and quantify different data segments. As a result, LLMs produce responses that are contextually relevant and deeply rooted in the specific domain knowledge.",
222
  "controlCategory": "Implementation",
223
+ "readableControlId": "DASF 25",
224
+ "severity": "medium",
225
+ "automationPlatforms": []
226
  },
227
  {
228
  "controlId": "DASF-26",
229
  "title": "Fine-tune large language models (LLMs)",
 
 
 
 
 
 
 
 
230
  "description": "Data is your competitive advantage. Use it to customize large AI models to beat your competition. Produce new model variants with tailored LLM response style and structure via fine-tuning. Fine-tune your own LLM with open models to own your IP.",
231
  "controlCategory": "Implementation",
232
+ "readableControlId": "DASF 26",
233
+ "severity": "medium",
234
+ "automationPlatforms": []
235
  },
236
  {
237
  "controlId": "DASF-27",
238
  "title": "Pretrain a large language model (LLM)",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  "description": "Data is your competitive advantage. Use it to customize large AI models to beat your competition by pretraining models with your data, imbuing the model with domain-specific knowledge, vocabulary and semantics. Pretrain your own LLM with MosaicML to own your IP.",
240
  "controlCategory": "Implementation",
241
+ "readableControlId": "DASF 27",
242
+ "severity": "medium",
243
+ "automationPlatforms": []
244
  },
245
  {
246
  "controlId": "DASF-28",
247
  "title": "Create model aliases, tags and annotations",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  "description": "Model aliases in machine learning workflows allow you to assign a mutable, named reference to a specific version of a registered model. This functionality is beneficial for tracking and managing different stages of a model\u2019s lifecycle, indicating the current deployment status of any given model version.",
249
  "controlCategory": "Implementation",
250
+ "readableControlId": "DASF 28",
251
+ "severity": "medium",
252
+ "automationPlatforms": []
253
  },
254
  {
255
  "controlId": "DASF-29",
256
  "title": "Build MLOps workflows",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  "description": "The lakehouse forms the foundation of a data-centric AI platform. Key to this is the ability to manage both data and AI assets from a unified governance solution on the lakehouse. Databricks Unity Catalog enables this by providing centralized access control, auditing, approvals, model workflow, lineage, and data discovery capabilities across Databricks workspaces. These benefits are now extended to MLflow Models with the introduction of Models in Unity Catalog. Through providing a hosted version of the MLflow Model Registry in Unity Catalog, the full lifecycle of an ML model can be managed while leveraging Unity Catalog\u2019s capability to share assets across Databricks workspaces and trace lineage across both data and models.",
258
  "controlCategory": "Implementation",
259
+ "readableControlId": "DASF 29",
260
+ "severity": "medium",
261
+ "automationPlatforms": []
262
  },
263
  {
264
  "controlId": "DASF-30",
265
  "title": "Encrypt models",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  "description": "Databricks Platform secures model assets and their transfer with TLS 1.2+ in-transit encryption. Additionally, Unity Catalog\u2019s managed model registry provides encryption at rest for persisting models, further enhancing security.",
267
  "controlCategory": "Out-of-the-box",
268
+ "readableControlId": "DASF 30",
269
+ "severity": "medium",
270
+ "automationPlatforms": []
271
  },
272
  {
273
  "controlId": "DASF-31",
274
  "title": "Secure model serving endpoints",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  "description": "Model serving involves risks of unauthorized data access and model tampering, which can compromise the integrity and reliability of machine learning deployments. Mosaic AI Model Serving addresses these concerns by providing secure-by-default REST API endpoints for MLflow machine learning models, featuring autoscaling, high availability and low latency.",
276
  "controlCategory": "Out-of-the-box",
277
+ "readableControlId": "DASF 31",
278
+ "severity": "medium",
279
+ "automationPlatforms": []
280
  },
281
  {
282
  "controlId": "DASF-32",
283
  "title": "Streamline the usage and management of various large language model (LLM) providers",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  "description": "External models are third-party models hosted outside of Databricks. Supported by Model Serving AI Gateway, Databricks external models via the AI Gateway allow you to streamline the usage and management of various large language model (LLM) providers, such as OpenAI and Anthropic, within an organization. You can also use Mosaic AI Model Serving as a provider to serve predictive ML models, which offers rate limits for those endpoints. As part of this support, Model Serving offers a high-level interface that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM-related requests. In addition, Databricks support for external models provides centralized credential management. By storing API keys in one secure location, organizations can enhance their security posture by minimizing the exposure of sensitive API keys throughout the system. It also helps to prevent exposing these keys within code or requiring end users to manage keys safely.",
285
  "controlCategory": "Out-of-the-box",
286
+ "readableControlId": "DASF 32",
287
+ "severity": "medium",
288
+ "automationPlatforms": []
289
  },
290
  {
291
  "controlId": "DASF-33",
292
  "title": "Manage credentials securely",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  "description": "Databricks Secrets stores your credentials and references them in notebooks, scripts, configuration properties and jobs. Integrating with heterogeneous systems requires managing a potentially large set of credentials and safely distributing them across an organization. Instead of directly entering your credentials into a notebook, use Databricks Secrets to store your credentials and reference them in notebooks and jobs to prevent credential leaks through models. Databricks secret management allows users to use and share credentials within Databricks securely. You can also choose to use a third-party secret management service, such as AWS Secrets Manager or a third-party secret manager.",
294
  "controlCategory": "Implementation",
295
+ "readableControlId": "DASF 33",
296
+ "severity": "medium",
297
+ "automationPlatforms": []
298
  },
299
  {
300
  "controlId": "DASF-34",
301
  "title": "Run models in multiple layers of isolation",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
  "description": "Databricks Serverless Compute provides a secure-by-design model serving service featuring defense-in-depth controls like dedicated VMs, network segmentation, and encryption for data in transit and at rest. It adheres to the principle of least privilege for enhanced security.",
303
  "controlCategory": "Out-of-the-box",
304
+ "readableControlId": "DASF 34",
305
+ "severity": "medium",
306
+ "automationPlatforms": []
307
  },
308
  {
309
  "controlId": "DASF-35",
310
  "title": "Track model performance",
 
 
 
 
 
 
 
 
311
  "description": "Databricks Lakehouse Monitoring provides performance metrics and data quality statistics across all account tables. It tracks the performance of machine learning models and model serving endpoints by observing inference tables with model inputs and predictions.",
312
  "controlCategory": "Implementation",
313
+ "readableControlId": "DASF 35",
314
+ "severity": "medium",
315
+ "automationPlatforms": []
316
  },
317
  {
318
  "controlId": "DASF-36",
319
  "title": "Set up monitoring alerts",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
  "description": "Databricks SQL alerts can monitor the metrics table for security-based conditions, ensuring data integrity and timely response to potential issues: Statistic range Alert: Triggers when a specific statistic, such as the fraction of missing values, exceeds a predetermined threshold. Data distribution shift alert: Activates upon shifts in data distribution, as indicated by the drift metrics table. Baseline divergence alert: Alerts if data significantly diverges from a baseline, suggesting potential needs for data analysis or model retraining, particularly in InferenceLog analysis.",
321
  "controlCategory": "Implementation",
322
+ "readableControlId": "DASF 36",
323
+ "severity": "medium",
324
+ "automationPlatforms": []
325
  },
326
  {
327
  "controlId": "DASF-37",
328
  "title": "Set up inference tables for monitoring and debugging models",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329
  "description": "Databricks inference tables automatically record incoming requests and outgoing responses to model serving endpoints, storing them as a Unity Catalog Delta table. This table can be used to monitor, debug and enhance ML models. By coupling inference tables with Lakehouse Monitoring, customers can also set up automated monitoring jobs and alerts on inference tables, such as monitoring text quality or toxicity from endpoints serving LLMs, etc. Critical applications of an inference table include: Retraining dataset creation, Quality monitoring, Diagnostics and debugging, and Mislabeled data identification.",
330
  "controlCategory": "Implementation",
331
+ "readableControlId": "DASF 37",
332
+ "severity": "medium",
333
+ "automationPlatforms": []
334
  },
335
  {
336
  "controlId": "DASF-38",
337
  "title": "Platform security \u2014 vulnerability management",
 
 
 
 
 
 
 
 
338
  "description": "Managing vulnerabilities entails addressing complex security challenges with performance impact considerations. Databricks\u2019 formal and documented vulnerability management program, overseen by the chief security officer (CSO), is approved by management, undergoes annual reviews and is communicated to all relevant internal parties. The policy requires that vulnerabilities be addressed based on severity: critical vulnerabilities within 14 days, high severity within 30 days and medium severity within 60 days.",
339
  "controlCategory": "Out-of-the-box",
340
+ "readableControlId": "DASF 38",
341
+ "severity": "medium",
342
+ "automationPlatforms": []
343
  },
344
  {
345
  "controlId": "DASF-39",
346
  "title": "Platform security \u2014 Incident Response Team",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
  "description": "Databricks has established a formal incident response plan that outlines key elements such as roles, responsibilities, escalation paths and external communication protocols. The platform handles over 9TB of audit logs daily, aiding customer and Databricks security investigations. A dedicated security incident response team operates an internal Databricks instance, consolidating essential log sources for thorough security analysis. Databricks ensures continual operational readiness with a 24/7/365 on-call rotation. Additionally, a proactive hunting program and a specialized detection team support the incident response program.",
348
  "controlCategory": "Out-of-the-box",
349
+ "readableControlId": "DASF 39",
350
+ "severity": "medium",
351
+ "automationPlatforms": []
352
  },
353
  {
354
  "controlId": "DASF-40",
355
  "title": "Platform security \u2014 internal access",
 
 
 
 
 
 
 
 
356
  "description": "Databricks personnel, by default, do not have access to customer workspaces or production environments. Access may be temporarily requested by Databricks staff for purposes such as investigating outages, security events or supporting deployments. Customers have the option to disable this access. Additionally, staff activity within these environments is recorded in customer audit logs. Accessing these areas requires multi-factor authentication, and employees must connect to the Databricks VPN.",
357
  "controlCategory": "Out-of-the-box",
358
+ "readableControlId": "DASF 40",
359
+ "severity": "medium",
360
+ "automationPlatforms": []
361
  },
362
  {
363
  "controlId": "DASF-41",
364
  "title": "Platform security \u2014 secure SDLC",
 
 
 
 
 
 
 
 
365
  "description": "Databricks engineering integrates security throughout the software development lifecycle (SDLC), encompassing both technical and process-level controls under the oversight of our chief security officer (CSO). Activities within our SDLC include: Code peer reviews, Static and dynamic scans for code and containers, including dependencies, Feature-level security reviews, Annual software engineering security training, and Cross-organizational collaborations between security, product management, product security and security champions. These development controls are augmented by internal and external penetration testing programs, with findings tracked for resolution and reported to our executive team. Databricks' processes undergo an independent annual review, the results of which are published in our SOC 2 Type 2 report, available upon request.",
366
  "controlCategory": "Out-of-the-box",
367
+ "readableControlId": "DASF 41",
368
+ "severity": "medium",
369
+ "automationPlatforms": []
370
  },
371
  {
372
  "controlId": "DASF-42",
373
  "title": "Employ data-centric MLOps and LLMOps",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374
  "description": "MLOps enhances efficiency, scalability, security and risk reduction in machine learning projects. Databricks integrates with MLflow, focusing on enterprise reliability, security and scalability for managing the machine learning lifecycle. The latest update to MLflow introduces new LLMOps features for better management and deployment of large language models (LLMs). This includes integrations with Hugging Face Transformers, OpenAI and the external models in Mosaic AI Model Serving. MLflow also integrates with LangChain and a prompt engineering UI, facilitating generative AI application development for use cases such as chatbots, document summarization and text classification.",
375
  "controlCategory": "Implementation",
376
+ "readableControlId": "DASF 42",
377
+ "severity": "medium",
378
+ "automationPlatforms": []
379
  },
380
  {
381
  "controlId": "DASF-43",
382
  "title": "Use access control lists",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
  "description": "Databricks access control lists (ACLs) enable you to configure permissions for accessing and interacting with workspace objects, including folders, notebooks, experiments, models, clusters, pools, jobs, Delta Live Tables pipelines, alerts, dashboards, queries and SQL warehouses.",
384
  "controlCategory": "Implementation",
385
+ "readableControlId": "DASF 43",
386
+ "severity": "medium",
387
+ "automationPlatforms": []
388
  },
389
  {
390
  "controlId": "DASF-44",
391
  "title": "Triggering actions in response to a specific event",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
  "description": "Webhooks in the MLflow Model Registry enable you to automate machine learning workflow by triggering actions in response to specific events. These webhooks facilitate seamless integrations, allowing for the automatic execution of various processes. For example, webhooks are used for: CI workflow trigger (Validate your model automatically when creating a new version), Team notifications (Send alerts through a messaging app when a model stage transition request is received), Model fairness evaluation (Invoke a workflow to assess model fairness and bias upon a production transition request), and Automated deployment (Trigger a deployment pipeline when a new tag is created on a model).",
393
  "controlCategory": "Implementation",
394
+ "readableControlId": "DASF 44",
395
+ "severity": "medium",
396
+ "automationPlatforms": []
397
  },
398
  {
399
  "controlId": "DASF-45",
400
  "title": "Evaluate models",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  "description": "Model evaluation is a critical component of the machine learning lifecycle. It provides data scientists with the tools to measure, interpret and explain the performance of their models. MLflow plays a critical role in accelerating model development by offering insights into the reasons behind a model's performance and guiding improvements and iterations. MLflow offers many industry-standard native evaluation metrics for classical machine learning algorithms and LLMs, and also facilitates the use of custom evaluation metrics.",
402
  "controlCategory": "Implementation",
403
+ "readableControlId": "DASF 45",
404
+ "severity": "medium",
405
+ "automationPlatforms": []
406
  },
407
  {
408
  "controlId": "DASF-46",
409
  "title": "Store and retrieve embeddings securely",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
  "description": "Mosaic AI Vector Search is a vector database that is built into the Databricks Data Intelligence Platform and integrated with its governance and productivity tools. A vector database is a database that is optimized to store and retrieve embeddings. Embeddings are mathematical representations of the semantic content of data, typically text or image data. Embeddings are usually generated by feature extraction models for text, image, audio or multi-modal data, and are a key component of many GenAI applications that depend on finding documents or images that are similar to each other. Examples are RAG systems, recommender systems, and image and video recognition. Databricks implements the following security controls to protect your data: Every customer request to Vector Search is logically isolated, authenticated and authorized, and Mosaic AI Vector Search encrypts all data at rest (AES-256) and in transit (TLS 1.2+).",
411
  "controlCategory": "Implementation",
412
+ "readableControlId": "DASF 46",
413
+ "severity": "medium",
414
+ "automationPlatforms": []
415
  },
416
  {
417
  "controlId": "DASF-47",
418
  "title": "Compare LLM outputs on set prompts",
 
 
 
 
 
 
 
 
419
  "description": "New, no-code visual tools allow users to compare models' output based on set prompts, which are automatically tracked within MLflow. With integration into Mosaic AI Model Serving, customers can deploy the best model to production. The AI Playground is a chat-like environment where you can test, prompt and compare LLMs.",
420
  "controlCategory": "Implementation",
421
+ "readableControlId": "DASF 47",
422
+ "severity": "medium",
423
+ "automationPlatforms": []
424
  },
425
  {
426
  "controlId": "DASF-48",
427
  "title": "Use hardened Runtime for Machine Learning",
 
 
 
 
 
 
 
 
428
  "description": "Databricks Runtime for Machine Learning (Databricks Runtime ML) now automates cluster creation with versatile infrastructure, encompassing pre-built ML/DL libraries and custom library integration. Enhanced scalability and cost management tools optimize performance and expenditure. The refined user interface caters to various expertise levels, while new collaboration features support team-based projects. Comprehensive training resources and detailed documentation complement these improvements.",
429
  "controlCategory": "Out-of-the-box",
430
+ "readableControlId": "DASF 48",
431
+ "severity": "medium",
432
+ "automationPlatforms": []
433
  },
434
  {
435
  "controlId": "DASF-49",
436
  "title": "Automate LLM evaluation",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
  "description": "The \"LLM-as-a-judge\" feature in MLflow 2.8 automates LLM evaluation, offering a practical alternative to human judgment. It's designed to be efficient and cost-effective, maintaining consistency with human scores. This tool supports various metrics, including standard and customizable GenAI metrics, and allows users to select an LLM as a judge and define specific grading criteria.",
438
  "controlCategory": "Implementation",
439
+ "readableControlId": "DASF 49",
440
+ "severity": "medium",
441
+ "automationPlatforms": []
442
  },
443
  {
444
  "controlId": "DASF-50",
445
  "title": "Platform compliance",
 
 
 
 
 
 
 
 
446
  "description": "Develop your solutions on a platform created using some of the most rigorous security and compliance standards in the world. Get independent audit reports verifying that Databricks adheres to security controls for ISO 27001, ISO 27018, SOC 1, SOC 2, FedRAMP, HITRUST, IRAP, etc.",
447
  "controlCategory": "Out-of-the-box",
448
+ "readableControlId": "DASF 50",
449
+ "severity": "medium",
450
+ "automationPlatforms": []
451
  },
452
  {
453
  "controlId": "DASF-51",
454
  "title": "Share data and AI assets securely",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455
  "description": "Databricks Delta Sharing lets you share data and AI assets securely in Databricks with users outside your organization, whether those users use Databricks or not.",
456
  "controlCategory": "Out-of-the-box",
457
+ "readableControlId": "DASF 51",
458
+ "severity": "medium",
459
+ "automationPlatforms": []
460
  },
461
  {
462
  "controlId": "DASF-52",
463
  "title": "Source code control",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464
  "description": "Databricks' Git Repository integration supports effective code and third-party libraries management, enhancing customer control over their development environment.",
465
  "controlCategory": "Out-of-the-box",
466
+ "readableControlId": "DASF 52",
467
+ "severity": "medium",
468
+ "automationPlatforms": []
469
  },
470
  {
471
  "controlId": "DASF-53",
472
  "title": "Third-party library control",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
  "description": "Databricks' library management system allows administrators to manage the installation and usage of third-party libraries effectively. This feature enhances the security and efficiency of systems, pipelines and data by giving administrators precise control over their development environment.",
474
  "controlCategory": "Out-of-the-box",
475
+ "readableControlId": "DASF 53",
476
+ "severity": "medium",
477
+ "automationPlatforms": []
478
  }
479
  ]
frameworks/sample/controls.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "controlId": "AI-DBA-1",
5
+ "title": "Data Bias Assessment and Mitigation",
6
+ "description": "The organization shall conduct comprehensive assessments to identify and mitigate potential biases in the data used to train AI systems. The following measures shall be implemented:\n\na. Bias Assessment Methodology: Establish a documented methodology for assessing biases in training data, including the specific types of biases to be evaluated (e.g., representational bias, sample bias, historical bias) and the techniques used to identify them (e.g., statistical analysis, fairness metrics).\n\nb. Bias Assessment Frequency: Conduct data bias assessments at regular intervals, such as prior to the initial use of the data for training, whenever significant changes are made to the data, and at least annually.\n\nc. Bias Assessment Reporting: Document the results of data bias assessments, including identified biases, their potential impact on AI system outcomes, and recommended mitigation strategies.\n\nd. Bias Mitigation Planning: Develop and maintain a bias mitigation plan that outlines the specific actions to be taken to address identified biases in the training data. This may include techniques such as data resampling, data augmentation, or the use of bias mitigation algorithms.\n\ne. Bias Mitigation Implementation: Implement the bias mitigation plan and document the actions taken to reduce or eliminate identified biases in the training data.\n\nf. Ongoing Monitoring: Establish processes for ongoing monitoring of AI system outcomes to detect and respond to any emergent biases that may arise over time.",
7
+ "controlCategory": "Data Bias",
8
+ "readableControlId": "AI-DBA-1",
9
+ "severity": "medium",
10
+ "automationPlatforms": [],
11
+ "criteria": [
12
+ {
13
+ "criteriaId": "1",
14
+ "title": "Bias Assessment Methodology Criteria",
15
+ "description": "1.1. Documented methodology for assessing biases in training data.\n1.2. Identification of specific types of biases to be evaluated (e.g., representational bias, sample bias, historical bias).\n1.3. Description of techniques used to identify biases (e.g., statistical analysis, fairness metrics)."
16
+ },
17
+ {
18
+ "criteriaId": "2",
19
+ "title": "Bias Assessment Frequency Criteria",
20
+ "description": "2.1. Documented schedule for conducting data bias assessments.\n2.2. Evidence of bias assessments performed prior to initial data use, when significant changes are made, and at least annually."
21
+ },
22
+ {
23
+ "criteriaId": "3",
24
+ "title": "Bias Assessment Reporting Criteria",
25
+ "description": "3.1. Documented results of data bias assessments, including identified biases and their potential impact on AI system outcomes.\n3.2. Recommendations for bias mitigation strategies based on assessment findings."
26
+ },
27
+ {
28
+ "criteriaId": "4",
29
+ "title": "Bias Mitigation Planning Criteria",
30
+ "description": "4.1. Documented bias mitigation plan outlining specific actions to address identified biases.\n4.2. Inclusion of techniques such as data resampling, data augmentation, or bias mitigation algorithms in the mitigation plan."
31
+ },
32
+ {
33
+ "criteriaId": "5",
34
+ "title": "Bias Mitigation Implementation Criteria",
35
+ "description": "5.1. Evidence of the implementation of the bias mitigation plan.\n5.2. Documentation of actions taken to reduce or eliminate identified biases in the training data."
36
+ },
37
+ {
38
+ "criteriaId": "6",
39
+ "title": "Ongoing Monitoring Criteria",
40
+ "description": "6.1. Established processes for ongoing monitoring of AI system outcomes to detect emergent biases.\n6.2. Documentation of any biases identified through ongoing monitoring and actions taken to address them."
41
+ }
42
+ ]
43
+ },
44
+ {
45
+ "controlId": "AI-DBA-2",
46
+ "title": "Data Collection and Preprocessing",
47
+ "description": "The organization shall ensure that data collection and preprocessing steps are designed to minimize the introduction of biases and ensure data quality. The following measures shall be implemented:\n\na. Data Source Selection: Identify and select diverse and representative data sources to reduce the risk of biases arising from limited or skewed data.\n\nb. Data Sampling Techniques: Employ appropriate data sampling techniques, such as stratified sampling or oversampling, to ensure balanced representation of different groups or classes in the training data.\n\nc. Data Quality Checks: Implement data quality checks to identify and address issues such as missing values, outliers, inconsistencies, and errors in the collected data.\n\nd. Data Preprocessing Guidelines: Establish and follow guidelines for data preprocessing tasks, including data cleaning, normalization, and feature selection, to maintain data integrity and reduce the introduction of biases.\n\ne. Data Labeling and Annotation: Ensure that data labeling and annotation processes are performed consistently and objectively, with clear guidelines and quality control measures to minimize the introduction of biases.\n\nf. Data Documentation: Maintain comprehensive documentation of data collection and preprocessing steps, including data sources, sampling methods, preprocessing techniques, and any assumptions made during the process.",
48
+ "controlCategory": "Data Bias",
49
+ "readableControlId": "AI-DBA-2",
50
+ "severity": "medium",
51
+ "automationPlatforms": [],
52
+ "criteria": [
53
+ {
54
+ "criteriaId": "1",
55
+ "title": "Data Source Selection Criteria",
56
+ "description": "1.1. Identification and selection of diverse and representative data sources.\n1.2. Documentation of the rationale for data source selection.\n1.3. Evidence of efforts to mitigate biases arising from limited or skewed data sources."
57
+ },
58
+ {
59
+ "criteriaId": "2",
60
+ "title": "Data Sampling Techniques Criteria",
61
+ "description": "2.1. Documented data sampling techniques used to ensure balanced representation.\n2.2. Justification for the selected sampling techniques based on data characteristics and bias mitigation goals.\n2.3. Evidence of the application of appropriate sampling techniques during data collection and preprocessing."
62
+ },
63
+ {
64
+ "criteriaId": "3",
65
+ "title": "Data Quality Checks Criteria",
66
+ "description": "3.1. Documented processes for performing data quality checks.\n3.2. Identification of specific data quality issues to be addressed (e.g., missing values, outliers, inconsistencies).\n3.3. Evidence of the execution of data quality checks and the resolution of identified issues."
67
+ },
68
+ {
69
+ "criteriaId": "4",
70
+ "title": "Data Preprocessing Guidelines Criteria",
71
+ "description": "4.1. Established guidelines for data preprocessing tasks, including data cleaning, normalization, and feature selection.\n4.2. Documentation of the rationale behind the preprocessing guidelines.\n4.3. Evidence of adherence to the preprocessing guidelines during the data preparation process."
72
+ },
73
+ {
74
+ "criteriaId": "5",
75
+ "title": "Data Labeling and Annotation Criteria",
76
+ "description": "5.1. Documented guidelines for consistent and objective data labeling and annotation.\n5.2. Quality control measures to ensure the accuracy and consistency of labeled and annotated data.\n5.3. Evidence of the application of the labeling and annotation guidelines during the data preparation process."
77
+ },
78
+ {
79
+ "criteriaId": "6",
80
+ "title": "Data Documentation Criteria",
81
+ "description": "6.1. Comprehensive documentation of data collection and preprocessing steps.\n6.2. Inclusion of information on data sources, sampling methods, preprocessing techniques, and assumptions made.\n6.3. Regular updates to the documentation to reflect any changes in the data collection and preprocessing processes."
82
+ }
83
+ ]
84
+ },
85
+ {
86
+ "controlId": "AI-DBA-3",
87
+ "title": "Model Development and Training",
88
+ "description": "The organization shall ensure that the model development and training phase incorporates measures to mitigate biases and promote fairness. The following measures shall be implemented:\n\na. Algorithm Selection: Select appropriate algorithms that are less prone to amplifying biases present in the training data. Consider the use of fairness-aware algorithms or algorithms with built-in bias mitigation techniques.\n\nb. Hyperparameter Tuning: Conduct thorough hyperparameter tuning to optimize model performance while considering fairness metrics. Evaluate the impact of different hyperparameter settings on bias mitigation.\n\nc. Bias Mitigation Techniques: Incorporate bias mitigation techniques during model training, such as regularization, adversarial debiasing, or fairness constraints. Document the specific techniques applied and their effectiveness in reducing biases.\n\nd. Training Data Balancing: Ensure that the training data is balanced and representative of the target population. Apply techniques like resampling, oversampling, or undersampling to address class imbalances that may contribute to biases.\n\ne. Fairness Metrics: Incorporate fairness metrics during model training and validation to assess the model's performance in terms of fairness and bias mitigation. Use appropriate fairness metrics based on the specific context and requirements of the AI system.\n\nf. Model Transparency: Maintain transparency in the model development process by documenting the algorithms used, hyperparameter settings, bias mitigation techniques applied, and any assumptions made during training.",
89
+ "controlCategory": "Data Bias",
90
+ "readableControlId": "AI-DBA-3",
91
+ "severity": "medium",
92
+ "automationPlatforms": [],
93
+ "criteria": [
94
+ {
95
+ "criteriaId": "1",
96
+ "title": "Algorithm Selection Criteria",
97
+ "description": "1.1. Documentation of the rationale for selecting specific algorithms, considering their potential impact on bias mitigation.\n1.2. Evidence of the use of fairness-aware algorithms or algorithms with built-in bias mitigation techniques, where applicable.\n1.3. Justification for the chosen algorithms based on their suitability for the specific AI system and bias mitigation requirements."
98
+ },
99
+ {
100
+ "criteriaId": "2",
101
+ "title": "Hyperparameter Tuning Criteria",
102
+ "description": "2.1. Documentation of the hyperparameter tuning process, including the range of hyperparameters explored and the evaluation metrics used.\n2.2. Evidence of considering fairness metrics during hyperparameter tuning.\n2.3. Analysis of the impact of different hyperparameter settings on bias mitigation and model performance."
103
+ },
104
+ {
105
+ "criteriaId": "3",
106
+ "title": "Bias Mitigation Techniques Criteria",
107
+ "description": "3.1. Documentation of the specific bias mitigation techniques applied during model training.\n3.2. Evaluation of the effectiveness of the applied bias mitigation techniques using appropriate fairness metrics.\n3.3. Justification for the selection of specific bias mitigation techniques based on their suitability for the AI system and the types of biases being addressed."
108
+ },
109
+ {
110
+ "criteriaId": "4",
111
+ "title": "Training Data Balancing Criteria",
112
+ "description": "4.1. Documentation of the techniques used to balance the training data, such as resampling, oversampling, or undersampling.\n4.2. Analysis of the impact of data balancing techniques on model performance and bias mitigation.\n4.3. Evidence of efforts to ensure that the training data is representative of the target population."
113
+ },
114
+ {
115
+ "criteriaId": "5",
116
+ "title": "Fairness Metrics Criteria",
117
+ "description": "5.1. Identification of appropriate fairness metrics based on the specific context and requirements of the AI system.\n5.2. Documentation of the fairness metrics used during model training and validation.\n5.3. Evaluation of the model's performance in terms of fairness and bias mitigation using the selected fairness metrics."
118
+ },
119
+ {
120
+ "criteriaId": "6",
121
+ "title": "Model Transparency Criteria",
122
+ "description": "6.1. Comprehensive documentation of the model development process, including algorithms used, hyperparameter settings, and bias mitigation techniques applied.\n6.2. Clear documentation of any assumptions made during model training.\n6.3. Availability of model documentation to relevant stakeholders for transparency and accountability."
123
+ }
124
+ ]
125
+ },
126
+ {
127
+ "controlId": "AI-DBA-4",
128
+ "title": "Model Evaluation and Testing",
129
+ "description": "The organization shall conduct comprehensive evaluations and testing of trained AI models to assess their performance, fairness, and the presence of any residual biases. The following measures shall be implemented:\n\na. Evaluation Metrics Selection: Select appropriate evaluation metrics that cover both performance and fairness aspects of the AI model. Include metrics such as accuracy, precision, recall, F1-score, as well as fairness metrics like demographic parity, equalized odds, or equal opportunity.\n\nb. Testing Methodology: Establish a robust testing methodology that includes techniques such as cross-validation, holdout validation, or stratified sampling to assess the model's performance and fairness across different subsets of the data.\n\nc. Bias Testing: Conduct targeted bias testing to evaluate the model's performance across different protected attributes or sensitive groups. Assess the model's fairness and identify any disparities in outcomes or error rates across these groups.\n\nd. Threshold Analysis: Perform threshold analysis to determine the appropriate decision thresholds for the AI model, considering both performance and fairness metrics. Evaluate the impact of different threshold settings on the model's fairness and accuracy.\n\ne. Residual Bias Assessment: Assess the presence of any residual biases in the trained model that may not have been fully mitigated during the training phase. Identify the sources and magnitude of these biases and develop strategies for further mitigation.\n\nf. Model Validation: Validate the AI model's performance and fairness on independent test datasets that were not used during training. Ensure that the model generalizes well to unseen data and maintains its fairness properties.",
130
+ "controlCategory": "Data Bias",
131
+ "readableControlId": "AI-DBA-4",
132
+ "severity": "medium",
133
+ "automationPlatforms": [],
134
+ "criteria": [
135
+ {
136
+ "criteriaId": "1",
137
+ "title": "Evaluation Metrics Selection Criteria",
138
+ "description": "1.1. Documentation of the selected evaluation metrics, including both performance and fairness metrics.\n1.2. Justification for the choice of evaluation metrics based on the specific requirements and context of the AI system.\n1.3. Evidence of the use of appropriate fairness metrics, such as demographic parity, equalized odds, or equal opportunity."
139
+ },
140
+ {
141
+ "criteriaId": "2",
142
+ "title": "Testing Methodology Criteria",
143
+ "description": "2.1. Documented testing methodology, including techniques such as cross-validation, holdout validation, or stratified sampling.\n2.2. Evidence of the application of the testing methodology to assess the model's performance and fairness across different data subsets.\n2.3. Analysis of the results obtained from the testing methodology, including performance and fairness metrics."
144
+ },
145
+ {
146
+ "criteriaId": "3",
147
+ "title": "Bias Testing Criteria",
148
+ "description": "3.1. Documentation of the targeted bias testing conducted, including the protected attributes or sensitive groups considered.\n3.2. Evaluation of the model's fairness and identification of any disparities in outcomes or error rates across different groups.\n3.3. Evidence of the use of appropriate fairness metrics during bias testing."
149
+ },
150
+ {
151
+ "criteriaId": "4",
152
+ "title": "Threshold Analysis Criteria",
153
+ "description": "4.1. Documentation of the threshold analysis performed, including the range of threshold values considered.\n4.2. Evaluation of the impact of different threshold settings on the model's fairness and accuracy.\n4.3. Justification for the selected decision thresholds based on the balance between performance and fairness requirements."
154
+ },
155
+ {
156
+ "criteriaId": "5",
157
+ "title": "Residual Bias Assessment Criteria",
158
+ "description": "5.1. Assessment of the presence of any residual biases in the trained model.\n5.2. Identification of the sources and magnitude of residual biases.\n5.3. Development of strategies and action plans for further bias mitigation based on the residual bias assessment findings."
159
+ },
160
+ {
161
+ "criteriaId": "6",
162
+ "title": "Model Validation Criteria",
163
+ "description": "6.1. Validation of the AI model's performance and fairness on independent test datasets.\n6.2. Evidence of the model's ability to generalize well to unseen data while maintaining its fairness properties.\n6.3. Documentation of any discrepancies or limitations identified during the model validation process."
164
+ }
165
+ ]
166
+ },
167
+ {
168
+ "controlId": "AI-DBA-5",
169
+ "title": "Deployment and Monitoring",
170
+ "description": "The organization shall ensure that the deployed AI system is continuously monitored for any emerging biases or fairness issues. The following measures shall be implemented:\n\na. Monitoring Plan: Establish a comprehensive monitoring plan that outlines the key metrics, data sources, and frequency of monitoring for the deployed AI system. The plan should cover both performance and fairness aspects of the system.\n\nb. Monitoring Mechanisms: Implement automated monitoring mechanisms to continuously collect and analyze data from the deployed AI system. These mechanisms should be designed to detect any deviations from the expected performance or fairness metrics.\n\nc. Fairness Drift Detection: Monitor for fairness drift, which refers to the gradual degradation of the AI system's fairness properties over time. Implement techniques to detect and quantify fairness drift, such as statistical tests or comparison with baseline fairness metrics.\n\nd. Bias Incident Response: Establish processes and protocols for promptly addressing any biases or fairness issues identified during monitoring. This includes conducting root cause analysis, developing mitigation strategies, and implementing necessary updates or adjustments to the AI system.\n\ne. Monitoring Reporting: Generate regular monitoring reports that provide insights into the AI system's performance and fairness metrics. These reports should be reviewed by relevant stakeholders and used to inform decision-making and continuous improvement efforts.\n\nf. Stakeholder Feedback: Establish channels for collecting and incorporating feedback from stakeholders, including users, customers, and impacted communities. Regularly solicit feedback on the AI system's fairness, transparency, and accountability, and use this feedback to guide monitoring and improvement efforts.",
171
+ "controlCategory": "Data Bias",
172
+ "readableControlId": "AI-DBA-5",
173
+ "severity": "medium",
174
+ "automationPlatforms": [],
175
+ "criteria": [
176
+ {
177
+ "criteriaId": "1",
178
+ "title": "Monitoring Plan Criteria",
179
+ "description": "1.1. Documented monitoring plan that outlines the key metrics, data sources, and frequency of monitoring.\n1.2. Coverage of both performance and fairness aspects in the monitoring plan.\n1.3. Justification for the selected monitoring metrics and frequencies based on the specific requirements and context of the AI system."
180
+ },
181
+ {
182
+ "criteriaId": "2",
183
+ "title": "Monitoring Mechanisms Criteria",
184
+ "description": "2.1. Implementation of automated monitoring mechanisms to continuously collect and analyze data from the deployed AI system.\n2.2. Evidence of the effectiveness of the monitoring mechanisms in detecting deviations from expected performance or fairness metrics.\n2.3. Documentation of the monitoring data collected and the analysis performed."
185
+ },
186
+ {
187
+ "criteriaId": "3",
188
+ "title": "Fairness Drift Detection Criteria",
189
+ "description": "3.1. Implementation of techniques to detect and quantify fairness drift in the deployed AI system.\n3.2. Regular monitoring and assessment of fairness drift using appropriate statistical tests or comparison with baseline fairness metrics.\n3.3. Documentation of any identified instances of fairness drift and the actions taken to address them."
190
+ },
191
+ {
192
+ "criteriaId": "4",
193
+ "title": "Bias Incident Response Criteria",
194
+ "description": "4.1. Established processes and protocols for promptly addressing biases or fairness issues identified during monitoring.\n4.2. Evidence of conducting root cause analysis and developing mitigation strategies for identified bias incidents.\n4.3. Documentation of the actions taken to address bias incidents, including updates or adjustments made to the AI system."
195
+ },
196
+ {
197
+ "criteriaId": "5",
198
+ "title": "Monitoring Reporting Criteria",
199
+ "description": "5.1. Generation of regular monitoring reports that provide insights into the AI system's performance and fairness metrics.\n5.2. Distribution of monitoring reports to relevant stakeholders for review and decision-making.\n5.3. Evidence of using monitoring reports to inform continuous improvement efforts and guide necessary updates to the AI system."
200
+ },
201
+ {
202
+ "criteriaId": "6",
203
+ "title": "Stakeholder Feedback Criteria",
204
+ "description": "6.1. Establishment of channels for collecting and incorporating feedback from stakeholders, including users, customers, and impacted communities.\n6.2. Regular solicitation of feedback on the AI system's fairness, transparency, and accountability.\n6.3. Documentation of how stakeholder feedback is used to guide monitoring and improvement efforts."
205
+ }
206
+ ]
207
+ }
208
+ ]
209
+ ]
frameworks/sample/framework.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "framework": {
3
+ "name": "AI Bias Assessment and Mitigation Framework",
4
+ "description": "A framework for assessing and mitigating bias in AI systems throughout the development lifecycle.",
5
+ "stages": [
6
+ {
7
+ "stageName": "Data Collection and Preprocessing",
8
+ "systemComponents": [
9
+ {
10
+ "componentName": "Data Source Selection",
11
+ "risks": [
12
+ {
13
+ "riskId": "1.1",
14
+ "title": "Biased Data Sources",
15
+ "definition": "The selected data sources may contain inherent biases or lack diversity, leading to biased AI models.",
16
+ "addressedByControls": ["AI-DBA-2"]
17
+ }
18
+ ]
19
+ },
20
+ {
21
+ "componentName": "Data Preprocessing",
22
+ "risks": [
23
+ {
24
+ "riskId": "1.2",
25
+ "title": "Preprocessing-Induced Bias",
26
+ "definition": "Data preprocessing techniques, such as feature selection or data cleaning, may introduce or amplify biases in the data.",
27
+ "addressedByControls": ["AI-DBA-2"]
28
+ }
29
+ ]
30
+ },
31
+ {
32
+ "componentName": "Data Bias Assessment",
33
+ "risks": [
34
+ {
35
+ "riskId": "1.3",
36
+ "title": "Inadequate Bias Assessment",
37
+ "definition": "Failure to conduct comprehensive assessments to identify potential biases in the training data may result in biased AI models.",
38
+ "addressedByControls": ["AI-DBA-1"]
39
+ }
40
+ ]
41
+ }
42
+ ]
43
+ },
44
+ {
45
+ "stageName": "Model Development and Training",
46
+ "systemComponents": [
47
+ {
48
+ "componentName": "Algorithm Selection",
49
+ "risks": [
50
+ {
51
+ "riskId": "2.1",
52
+ "title": "Algorithmic Bias",
53
+ "definition": "The chosen algorithms may have inherent biases or may amplify biases present in the training data.",
54
+ "addressedByControls": ["AI-DBA-3"]
55
+ }
56
+ ]
57
+ },
58
+ {
59
+ "componentName": "Model Training",
60
+ "risks": [
61
+ {
62
+ "riskId": "2.2",
63
+ "title": "Training Data Bias",
64
+ "definition": "The training data used to develop the AI model may contain biases, leading to biased model outputs.",
65
+ "addressedByControls": ["AI-DBA-3"]
66
+ }
67
+ ]
68
+ }
69
+ ]
70
+ },
71
+ {
72
+ "stageName": "Model Evaluation and Testing",
73
+ "systemComponents": [
74
+ {
75
+ "componentName": "Performance Evaluation",
76
+ "risks": [
77
+ {
78
+ "riskId": "3.1",
79
+ "title": "Inadequate Performance Metrics",
80
+ "definition": "The selected performance metrics may not adequately capture the fairness and bias aspects of the AI model.",
81
+ "addressedByControls": ["AI-DBA-4"]
82
+ }
83
+ ]
84
+ },
85
+ {
86
+ "componentName": "Bias Testing",
87
+ "risks": [
88
+ {
89
+ "riskId": "3.2",
90
+ "title": "Undetected Residual Bias",
91
+ "definition": "The testing process may fail to identify and quantify residual biases present in the trained AI model.",
92
+ "addressedByControls": ["AI-DBA-4"]
93
+ }
94
+ ]
95
+ }
96
+ ]
97
+ },
98
+ {
99
+ "stageName": "Deployment and Monitoring",
100
+ "systemComponents": [
101
+ {
102
+ "componentName": "Model Deployment",
103
+ "risks": [
104
+ {
105
+ "riskId": "4.1",
106
+ "title": "Fairness Drift",
107
+ "definition": "The fairness properties of the AI model may degrade over time due to changes in the underlying data or environment.",
108
+ "addressedByControls": ["AI-DBA-5"]
109
+ }
110
+ ]
111
+ },
112
+ {
113
+ "componentName": "Monitoring and Feedback",
114
+ "risks": [
115
+ {
116
+ "riskId": "4.2",
117
+ "title": "Insufficient Monitoring",
118
+ "definition": "The monitoring processes may not effectively detect emerging biases or fairness issues in the deployed AI system.",
119
+ "addressedByControls": ["AI-DBA-5"]
120
+ }
121
+ ]
122
+ }
123
+ ]
124
+ }
125
+ ]
126
+ }
127
+ }
schema/controls_schema.json CHANGED
@@ -11,38 +11,52 @@
11
  "title": {
12
  "type": "string"
13
  },
14
- "risks": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  "type": "array",
16
  "items": {
17
  "type": "object",
18
  "properties": {
19
- "component": {
20
  "type": "string"
21
  },
22
- "identifier": {
23
- "type": "string",
24
- "pattern": "^[0-9]+\\.[0-9]+$"
25
- },
26
- "riskId": {
27
  "type": "string"
28
  },
29
- "summary": {
30
  "type": "string"
31
  }
32
  },
33
- "required": ["component", "identifier", "riskId", "summary"]
34
  }
35
- },
36
- "description": {
37
- "type": "string"
38
- },
39
- "controlCategory": {
40
- "type": "string"
41
- },
42
- "readableControlId": {
43
- "type": "string"
44
  }
45
  },
46
- "required": ["controlId", "title", "risks", "description", "controlCategory", "readableControlId"]
 
 
 
 
 
 
 
 
47
  }
48
  }
 
11
  "title": {
12
  "type": "string"
13
  },
14
+ "description": {
15
+ "type": "string"
16
+ },
17
+ "controlCategory": {
18
+ "type": "string"
19
+ },
20
+ "readableControlId": {
21
+ "type": "string"
22
+ },
23
+ "severity": {
24
+ "type": "string",
25
+ "enum": ["low", "medium", "high"]
26
+ },
27
+ "automationPlatforms": {
28
+ "type": "array",
29
+ "items": {
30
+ "type": "string"
31
+ }
32
+ },
33
+ "criteria": {
34
  "type": "array",
35
  "items": {
36
  "type": "object",
37
  "properties": {
38
+ "criteriaId": {
39
  "type": "string"
40
  },
41
+ "title": {
 
 
 
 
42
  "type": "string"
43
  },
44
+ "description": {
45
  "type": "string"
46
  }
47
  },
48
+ "required": ["criteriaId", "title", "description"]
49
  }
 
 
 
 
 
 
 
 
 
50
  }
51
  },
52
+ "required": [
53
+ "controlId",
54
+ "title",
55
+ "description",
56
+ "controlCategory",
57
+ "readableControlId",
58
+ "severity",
59
+ "criteria"
60
+ ]
61
  }
62
  }