backnotprop tmccoy14 commited on
Commit
cc18926
1 Parent(s): 18071a6

Update frameworks/dasf/framework.json (#4)

Browse files

- Update frameworks/dasf/framework.json (5886049f1817bfb9dcb74e2640f5bea0d1686dde)


Co-authored-by: Tucker McCoy <tmccoy14@users.noreply.huggingface.co>

Files changed (1) hide show
  1. frameworks/dasf/framework.json +26 -26
frameworks/dasf/framework.json CHANGED
@@ -670,7 +670,7 @@
670
  "description": "Synchronizing users and groups from your identity provider (IdP) with Databricks using the SCIM standard facilitates consistent and automated user provisioning for enhancing security.",
671
  "controlCategory": "Configuration",
672
  "readableControlId": "DASF 2",
673
- "severity": "medium",
674
  "automationPlatforms": ["AZURE_DATABRICKS"],
675
  "criteria": []
676
  },
@@ -680,7 +680,7 @@
680
  "description": "Configure IP access lists to restrict authentication to Databricks from specific IP ranges, such as VPNs or office networks, and strengthen network security by preventing unauthorized access from untrusted locations.",
681
  "controlCategory": "Configuration",
682
  "readableControlId": "DASF 3",
683
- "severity": "medium",
684
  "automationPlatforms": ["AZURE_DATABRICKS"],
685
  "criteria": []
686
  },
@@ -690,7 +690,7 @@
690
  "description": "Use AWS PrivateLink, Azure Private Link or GCP Private Service Connect to create a private network route between the customer and the Databricks control plane or the control plane and the customer\u2019s compute plane environments to enhance data security by avoiding public internet exposure.",
691
  "controlCategory": "Configuration",
692
  "readableControlId": "DASF 4",
693
- "severity": "medium",
694
  "automationPlatforms": ["AZURE_DATABRICKS"],
695
  "criteria": []
696
  },
@@ -700,7 +700,7 @@
700
  "description": "Implementing Unity Catalog for unified permissions management and assets simplifies access control and enhances security.",
701
  "controlCategory": "Implementation",
702
  "readableControlId": "DASF 5",
703
- "severity": "medium",
704
  "automationPlatforms": ["AZURE_DATABRICKS"],
705
  "criteria": []
706
  },
@@ -710,7 +710,7 @@
710
  "description": "Tags are attributes containing keys and optional values that you can apply to different securable objects in Unity Catalog. Organizing securable objects with tags in Unity Catalog aids in efficient data management, data discovery and classification, essential for handling large datasets.",
711
  "controlCategory": "Implementation",
712
  "readableControlId": "DASF 6",
713
- "severity": "medium",
714
  "automationPlatforms": ["AZURE_DATABRICKS"],
715
  "criteria": []
716
  },
@@ -730,7 +730,7 @@
730
  "description": "Databricks supports customer-managed encryption keys to strengthen data at rest protection and greater access control.",
731
  "controlCategory": "Configuration",
732
  "readableControlId": "DASF 8",
733
- "severity": "medium",
734
  "automationPlatforms": ["AZURE_DATABRICKS"],
735
  "criteria": []
736
  },
@@ -740,7 +740,7 @@
740
  "description": "Databricks supports TLS 1.2+ encryption to protect customer data during transit. This applies to data transfer between the customer and the Databricks control plane and within the compute plane. Customers can also secure inter-cluster communications within the compute plane per their security requirements.",
741
  "controlCategory": "Out-of-the-box",
742
  "readableControlId": "DASF 9",
743
- "severity": "medium",
744
  "automationPlatforms": ["AZURE_DATABRICKS"],
745
  "criteria": []
746
  },
@@ -750,7 +750,7 @@
750
  "description": "Store data in a lakehouse architecture using Delta tables. Delta tables can be versioned to revert any user\u2019s or malicious actor\u2019s poisoning of data. Data can be stored in a lakehouse architecture in the customer\u2019s cloud account. Both raw data and feature tables are stored as Delta tables with access controls to determine who can read and modify them. Data lineage with UC helps track and audit changes and the origin of ML data sources. Each operation that modifies a Delta Lake table creates a new table version. User actions are tracked and audited, and lineage of transformations is available all in the same platform. You can use history information to audit operations, roll back a table or query a table at a specific point in time using time travel.",
751
  "controlCategory": "Implementation",
752
  "readableControlId": "DASF 10",
753
- "severity": "medium",
754
  "automationPlatforms": ["AZURE_DATABRICKS"],
755
  "criteria": []
756
  },
@@ -810,7 +810,7 @@
810
  "description": "Databricks Feature Store is a centralized repository that enables data scientists to find and share features and also ensures that the same code used to compute the feature values is used for model training and inference. Unity Catalog\u2019s capabilities, such as security, lineage, table history, tagging and cross-workspace access, are automatically available to the feature table to reduce the risk of malicious actors manipulating the features that feed into ML training.",
811
  "controlCategory": "Implementation",
812
  "readableControlId": "DASF 16",
813
- "severity": "medium",
814
  "automationPlatforms": ["AZURE_DATABRICKS"],
815
  "criteria": []
816
  },
@@ -820,7 +820,7 @@
820
  "description": "MLflow with Delta Lake tracks the training data used for ML model training. It also enables the identification of specific ML models and runs derived from particular datasets for regulatory and auditable attribution.",
821
  "controlCategory": "Configuration",
822
  "readableControlId": "DASF 17",
823
- "severity": "medium",
824
  "automationPlatforms": ["AZURE_DATABRICKS"],
825
  "criteria": []
826
  },
@@ -830,7 +830,7 @@
830
  "description": "With Unity Catalog, organizations can implement a unified governance framework for their structured and unstructured data, machine learning models, notebooks, features, functions, and files, enhancing security and compliance across clouds and platforms.",
831
  "controlCategory": "Configuration",
832
  "readableControlId": "DASF 18",
833
- "severity": "medium",
834
  "automationPlatforms": ["AZURE_DATABRICKS"],
835
  "criteria": []
836
  },
@@ -840,7 +840,7 @@
840
  "description": "Databricks includes a managed version of MLflow featuring enterprise security controls and high availability. It supports functionalities like experiments, run management and notebook revision capture. MLflow on Databricks allows tracking and measuring machine learning model training runs, logging model training artifacts and securing machine learning projects.",
841
  "controlCategory": "Implementation",
842
  "readableControlId": "DASF 19",
843
- "severity": "medium",
844
  "automationPlatforms": ["AZURE_DATABRICKS"],
845
  "criteria": []
846
  },
@@ -860,7 +860,7 @@
860
  "description": "Databricks Lakehouse Monitoring offers a single pane of glass to centrally track tables\u2019 data quality and statistical properties and automatically classifies data. It can also track the performance of machine learning models and model serving endpoints by monitoring inference tables containing model inputs and predictions through a single pane of glass.",
861
  "controlCategory": "Implementation",
862
  "readableControlId": "DASF 21",
863
- "severity": "medium",
864
  "automationPlatforms": ["AZURE_DATABRICKS"],
865
  "criteria": []
866
  },
@@ -900,7 +900,7 @@
900
  "description": "Generating relevant and accurate responses in large language models (LLMs) while avoiding hallucinations requires grounding them in domain-specific knowledge. Retrieval augmented generation (RAG) addresses this by breaking down extensive datasets into manageable segments (\u201cchunks\u201d) that are \u201cvector embedded.\u201d These vector embeddings are mathematical representations that help the model understand and quantify different data segments. As a result, LLMs produce responses that are contextually relevant and deeply rooted in the specific domain knowledge.",
901
  "controlCategory": "Implementation",
902
  "readableControlId": "DASF 25",
903
- "severity": "medium",
904
  "automationPlatforms": ["AZURE_DATABRICKS"],
905
  "criteria": []
906
  },
@@ -910,7 +910,7 @@
910
  "description": "Data is your competitive advantage. Use it to customize large AI models to beat your competition. Produce new model variants with tailored LLM response style and structure via fine-tuning. Fine-tune your own LLM with open models to own your IP.",
911
  "controlCategory": "Implementation",
912
  "readableControlId": "DASF 26",
913
- "severity": "medium",
914
  "automationPlatforms": ["AZURE_DATABRICKS"],
915
  "criteria": []
916
  },
@@ -960,7 +960,7 @@
960
  "description": "Model serving involves risks of unauthorized data access and model tampering, which can compromise the integrity and reliability of machine learning deployments. Mosaic AI Model Serving addresses these concerns by providing secure-by-default REST API endpoints for MLflow machine learning models, featuring autoscaling, high availability and low latency.",
961
  "controlCategory": "Out-of-the-box",
962
  "readableControlId": "DASF 31",
963
- "severity": "medium",
964
  "automationPlatforms": ["AZURE_DATABRICKS"],
965
  "criteria": []
966
  },
@@ -970,7 +970,7 @@
970
  "description": "External models are third-party models hosted outside of Databricks. Supported by Model Serving AI Gateway, Databricks external models via the AI Gateway allow you to streamline the usage and management of various large language model (LLM) providers, such as OpenAI and Anthropic, within an organization. You can also use Mosaic AI Model Serving as a provider to serve predictive ML models, which offers rate limits for those endpoints. As part of this support, Model Serving offers a high-level interface that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM-related requests. In addition, Databricks support for external models provides centralized credential management. By storing API keys in one secure location, organizations can enhance their security posture by minimizing the exposure of sensitive API keys throughout the system. It also helps to prevent exposing these keys within code or requiring end users to manage keys safely.",
971
  "controlCategory": "Out-of-the-box",
972
  "readableControlId": "DASF 32",
973
- "severity": "medium",
974
  "automationPlatforms": ["AZURE_DATABRICKS"],
975
  "criteria": []
976
  },
@@ -1010,7 +1010,7 @@
1010
  "description": "Databricks SQL alerts can monitor the metrics table for security-based conditions, ensuring data integrity and timely response to potential issues: Statistic range Alert: Triggers when a specific statistic, such as the fraction of missing values, exceeds a predetermined threshold. Data distribution shift alert: Activates upon shifts in data distribution, as indicated by the drift metrics table. Baseline divergence alert: Alerts if data significantly diverges from a baseline, suggesting potential needs for data analysis or model retraining, particularly in InferenceLog analysis.",
1011
  "controlCategory": "Implementation",
1012
  "readableControlId": "DASF 36",
1013
- "severity": "medium",
1014
  "automationPlatforms": ["AZURE_DATABRICKS"],
1015
  "criteria": []
1016
  },
@@ -1040,7 +1040,7 @@
1040
  "description": "Databricks has established a formal incident response plan that outlines key elements such as roles, responsibilities, escalation paths and external communication protocols. The platform handles over 9TB of audit logs daily, aiding customer and Databricks security investigations. A dedicated security incident response team operates an internal Databricks instance, consolidating essential log sources for thorough security analysis. Databricks ensures continual operational readiness with a 24/7/365 on-call rotation. Additionally, a proactive hunting program and a specialized detection team support the incident response program.",
1041
  "controlCategory": "Out-of-the-box",
1042
  "readableControlId": "DASF 39",
1043
- "severity": "medium",
1044
  "automationPlatforms": ["AZURE_DATABRICKS"],
1045
  "criteria": []
1046
  },
@@ -1050,7 +1050,7 @@
1050
  "description": "Databricks personnel, by default, do not have access to customer workspaces or production environments. Access may be temporarily requested by Databricks staff for purposes such as investigating outages, security events or supporting deployments. Customers have the option to disable this access. Additionally, staff activity within these environments is recorded in customer audit logs. Accessing these areas requires multi-factor authentication, and employees must connect to the Databricks VPN.",
1051
  "controlCategory": "Out-of-the-box",
1052
  "readableControlId": "DASF 40",
1053
- "severity": "medium",
1054
  "automationPlatforms": ["AZURE_DATABRICKS"],
1055
  "criteria": []
1056
  },
@@ -1090,7 +1090,7 @@
1090
  "description": "Webhooks in the MLflow Model Registry enable you to automate machine learning workflow by triggering actions in response to specific events. These webhooks facilitate seamless integrations, allowing for the automatic execution of various processes. For example, webhooks are used for: CI workflow trigger (Validate your model automatically when creating a new version), Team notifications (Send alerts through a messaging app when a model stage transition request is received), Model fairness evaluation (Invoke a workflow to assess model fairness and bias upon a production transition request), and Automated deployment (Trigger a deployment pipeline when a new tag is created on a model).",
1091
  "controlCategory": "Implementation",
1092
  "readableControlId": "DASF 44",
1093
- "severity": "medium",
1094
  "automationPlatforms": ["AZURE_DATABRICKS"],
1095
  "criteria": []
1096
  },
@@ -1100,7 +1100,7 @@
1100
  "description": "Model evaluation is a critical component of the machine learning lifecycle. It provides data scientists with the tools to measure, interpret and explain the performance of their models. MLflow plays a critical role in accelerating model development by offering insights into the reasons behind a model's performance and guiding improvements and iterations. MLflow offers many industry-standard native evaluation metrics for classical machine learning algorithms and LLMs, and also facilitates the use of custom evaluation metrics.",
1101
  "controlCategory": "Implementation",
1102
  "readableControlId": "DASF 45",
1103
- "severity": "medium",
1104
  "automationPlatforms": ["AZURE_DATABRICKS"],
1105
  "criteria": []
1106
  },
@@ -1110,7 +1110,7 @@
1110
  "description": "Mosaic AI Vector Search is a vector database that is built into the Databricks Data Intelligence Platform and integrated with its governance and productivity tools. A vector database is a database that is optimized to store and retrieve embeddings. Embeddings are mathematical representations of the semantic content of data, typically text or image data. Embeddings are usually generated by feature extraction models for text, image, audio or multi-modal data, and are a key component of many GenAI applications that depend on finding documents or images that are similar to each other. Examples are RAG systems, recommender systems, and image and video recognition. Databricks implements the following security controls to protect your data: Every customer request to Vector Search is logically isolated, authenticated and authorized, and Mosaic AI Vector Search encrypts all data at rest (AES-256) and in transit (TLS 1.2+).",
1111
  "controlCategory": "Implementation",
1112
  "readableControlId": "DASF 46",
1113
- "severity": "medium",
1114
  "automationPlatforms": ["AZURE_DATABRICKS"],
1115
  "criteria": []
1116
  },
@@ -1120,7 +1120,7 @@
1120
  "description": "New, no-code visual tools allow users to compare models' output based on set prompts, which are automatically tracked within MLflow. With integration into Mosaic AI Model Serving, customers can deploy the best model to production. The AI Playground is a chat-like environment where you can test, prompt and compare LLMs.",
1121
  "controlCategory": "Implementation",
1122
  "readableControlId": "DASF 47",
1123
- "severity": "medium",
1124
  "automationPlatforms": ["AZURE_DATABRICKS"],
1125
  "criteria": []
1126
  },
@@ -1170,7 +1170,7 @@
1170
  "description": "Databricks' Git Repository integration supports effective code and third-party libraries management, enhancing customer control over their development environment.",
1171
  "controlCategory": "Out-of-the-box",
1172
  "readableControlId": "DASF 52",
1173
- "severity": "medium",
1174
  "automationPlatforms": ["AZURE_DATABRICKS"],
1175
  "criteria": []
1176
  },
@@ -1180,7 +1180,7 @@
1180
  "description": "Databricks' library management system allows administrators to manage the installation and usage of third-party libraries effectively. This feature enhances the security and efficiency of systems, pipelines and data by giving administrators precise control over their development environment.",
1181
  "controlCategory": "Out-of-the-box",
1182
  "readableControlId": "DASF 53",
1183
- "severity": "medium",
1184
  "automationPlatforms": ["AZURE_DATABRICKS"],
1185
  "criteria": []
1186
  }
 
670
  "description": "Synchronizing users and groups from your identity provider (IdP) with Databricks using the SCIM standard facilitates consistent and automated user provisioning for enhancing security.",
671
  "controlCategory": "Configuration",
672
  "readableControlId": "DASF 2",
673
+ "severity": "low",
674
  "automationPlatforms": ["AZURE_DATABRICKS"],
675
  "criteria": []
676
  },
 
680
  "description": "Configure IP access lists to restrict authentication to Databricks from specific IP ranges, such as VPNs or office networks, and strengthen network security by preventing unauthorized access from untrusted locations.",
681
  "controlCategory": "Configuration",
682
  "readableControlId": "DASF 3",
683
+ "severity": "low",
684
  "automationPlatforms": ["AZURE_DATABRICKS"],
685
  "criteria": []
686
  },
 
690
  "description": "Use AWS PrivateLink, Azure Private Link or GCP Private Service Connect to create a private network route between the customer and the Databricks control plane or the control plane and the customer\u2019s compute plane environments to enhance data security by avoiding public internet exposure.",
691
  "controlCategory": "Configuration",
692
  "readableControlId": "DASF 4",
693
+ "severity": "high",
694
  "automationPlatforms": ["AZURE_DATABRICKS"],
695
  "criteria": []
696
  },
 
700
  "description": "Implementing Unity Catalog for unified permissions management and assets simplifies access control and enhances security.",
701
  "controlCategory": "Implementation",
702
  "readableControlId": "DASF 5",
703
+ "severity": "high",
704
  "automationPlatforms": ["AZURE_DATABRICKS"],
705
  "criteria": []
706
  },
 
710
  "description": "Tags are attributes containing keys and optional values that you can apply to different securable objects in Unity Catalog. Organizing securable objects with tags in Unity Catalog aids in efficient data management, data discovery and classification, essential for handling large datasets.",
711
  "controlCategory": "Implementation",
712
  "readableControlId": "DASF 6",
713
+ "severity": "high",
714
  "automationPlatforms": ["AZURE_DATABRICKS"],
715
  "criteria": []
716
  },
 
730
  "description": "Databricks supports customer-managed encryption keys to strengthen data at rest protection and greater access control.",
731
  "controlCategory": "Configuration",
732
  "readableControlId": "DASF 8",
733
+ "severity": "low",
734
  "automationPlatforms": ["AZURE_DATABRICKS"],
735
  "criteria": []
736
  },
 
740
  "description": "Databricks supports TLS 1.2+ encryption to protect customer data during transit. This applies to data transfer between the customer and the Databricks control plane and within the compute plane. Customers can also secure inter-cluster communications within the compute plane per their security requirements.",
741
  "controlCategory": "Out-of-the-box",
742
  "readableControlId": "DASF 9",
743
+ "severity": "low",
744
  "automationPlatforms": ["AZURE_DATABRICKS"],
745
  "criteria": []
746
  },
 
750
  "description": "Store data in a lakehouse architecture using Delta tables. Delta tables can be versioned to revert any user\u2019s or malicious actor\u2019s poisoning of data. Data can be stored in a lakehouse architecture in the customer\u2019s cloud account. Both raw data and feature tables are stored as Delta tables with access controls to determine who can read and modify them. Data lineage with UC helps track and audit changes and the origin of ML data sources. Each operation that modifies a Delta Lake table creates a new table version. User actions are tracked and audited, and lineage of transformations is available all in the same platform. You can use history information to audit operations, roll back a table or query a table at a specific point in time using time travel.",
751
  "controlCategory": "Implementation",
752
  "readableControlId": "DASF 10",
753
+ "severity": "low",
754
  "automationPlatforms": ["AZURE_DATABRICKS"],
755
  "criteria": []
756
  },
 
810
  "description": "Databricks Feature Store is a centralized repository that enables data scientists to find and share features and also ensures that the same code used to compute the feature values is used for model training and inference. Unity Catalog\u2019s capabilities, such as security, lineage, table history, tagging and cross-workspace access, are automatically available to the feature table to reduce the risk of malicious actors manipulating the features that feed into ML training.",
811
  "controlCategory": "Implementation",
812
  "readableControlId": "DASF 16",
813
+ "severity": "high",
814
  "automationPlatforms": ["AZURE_DATABRICKS"],
815
  "criteria": []
816
  },
 
820
  "description": "MLflow with Delta Lake tracks the training data used for ML model training. It also enables the identification of specific ML models and runs derived from particular datasets for regulatory and auditable attribution.",
821
  "controlCategory": "Configuration",
822
  "readableControlId": "DASF 17",
823
+ "severity": "high",
824
  "automationPlatforms": ["AZURE_DATABRICKS"],
825
  "criteria": []
826
  },
 
830
  "description": "With Unity Catalog, organizations can implement a unified governance framework for their structured and unstructured data, machine learning models, notebooks, features, functions, and files, enhancing security and compliance across clouds and platforms.",
831
  "controlCategory": "Configuration",
832
  "readableControlId": "DASF 18",
833
+ "severity": "high",
834
  "automationPlatforms": ["AZURE_DATABRICKS"],
835
  "criteria": []
836
  },
 
840
  "description": "Databricks includes a managed version of MLflow featuring enterprise security controls and high availability. It supports functionalities like experiments, run management and notebook revision capture. MLflow on Databricks allows tracking and measuring machine learning model training runs, logging model training artifacts and securing machine learning projects.",
841
  "controlCategory": "Implementation",
842
  "readableControlId": "DASF 19",
843
+ "severity": "high",
844
  "automationPlatforms": ["AZURE_DATABRICKS"],
845
  "criteria": []
846
  },
 
860
  "description": "Databricks Lakehouse Monitoring offers a single pane of glass to centrally track tables\u2019 data quality and statistical properties and automatically classifies data. It can also track the performance of machine learning models and model serving endpoints by monitoring inference tables containing model inputs and predictions through a single pane of glass.",
861
  "controlCategory": "Implementation",
862
  "readableControlId": "DASF 21",
863
+ "severity": "low",
864
  "automationPlatforms": ["AZURE_DATABRICKS"],
865
  "criteria": []
866
  },
 
900
  "description": "Generating relevant and accurate responses in large language models (LLMs) while avoiding hallucinations requires grounding them in domain-specific knowledge. Retrieval augmented generation (RAG) addresses this by breaking down extensive datasets into manageable segments (\u201cchunks\u201d) that are \u201cvector embedded.\u201d These vector embeddings are mathematical representations that help the model understand and quantify different data segments. As a result, LLMs produce responses that are contextually relevant and deeply rooted in the specific domain knowledge.",
901
  "controlCategory": "Implementation",
902
  "readableControlId": "DASF 25",
903
+ "severity": "low",
904
  "automationPlatforms": ["AZURE_DATABRICKS"],
905
  "criteria": []
906
  },
 
910
  "description": "Data is your competitive advantage. Use it to customize large AI models to beat your competition. Produce new model variants with tailored LLM response style and structure via fine-tuning. Fine-tune your own LLM with open models to own your IP.",
911
  "controlCategory": "Implementation",
912
  "readableControlId": "DASF 26",
913
+ "severity": "low",
914
  "automationPlatforms": ["AZURE_DATABRICKS"],
915
  "criteria": []
916
  },
 
960
  "description": "Model serving involves risks of unauthorized data access and model tampering, which can compromise the integrity and reliability of machine learning deployments. Mosaic AI Model Serving addresses these concerns by providing secure-by-default REST API endpoints for MLflow machine learning models, featuring autoscaling, high availability and low latency.",
961
  "controlCategory": "Out-of-the-box",
962
  "readableControlId": "DASF 31",
963
+ "severity": "high",
964
  "automationPlatforms": ["AZURE_DATABRICKS"],
965
  "criteria": []
966
  },
 
970
  "description": "External models are third-party models hosted outside of Databricks. Supported by Model Serving AI Gateway, Databricks external models via the AI Gateway allow you to streamline the usage and management of various large language model (LLM) providers, such as OpenAI and Anthropic, within an organization. You can also use Mosaic AI Model Serving as a provider to serve predictive ML models, which offers rate limits for those endpoints. As part of this support, Model Serving offers a high-level interface that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM-related requests. In addition, Databricks support for external models provides centralized credential management. By storing API keys in one secure location, organizations can enhance their security posture by minimizing the exposure of sensitive API keys throughout the system. It also helps to prevent exposing these keys within code or requiring end users to manage keys safely.",
971
  "controlCategory": "Out-of-the-box",
972
  "readableControlId": "DASF 32",
973
+ "severity": "high",
974
  "automationPlatforms": ["AZURE_DATABRICKS"],
975
  "criteria": []
976
  },
 
1010
  "description": "Databricks SQL alerts can monitor the metrics table for security-based conditions, ensuring data integrity and timely response to potential issues: Statistic range Alert: Triggers when a specific statistic, such as the fraction of missing values, exceeds a predetermined threshold. Data distribution shift alert: Activates upon shifts in data distribution, as indicated by the drift metrics table. Baseline divergence alert: Alerts if data significantly diverges from a baseline, suggesting potential needs for data analysis or model retraining, particularly in InferenceLog analysis.",
1011
  "controlCategory": "Implementation",
1012
  "readableControlId": "DASF 36",
1013
+ "severity": "high",
1014
  "automationPlatforms": ["AZURE_DATABRICKS"],
1015
  "criteria": []
1016
  },
 
1040
  "description": "Databricks has established a formal incident response plan that outlines key elements such as roles, responsibilities, escalation paths and external communication protocols. The platform handles over 9TB of audit logs daily, aiding customer and Databricks security investigations. A dedicated security incident response team operates an internal Databricks instance, consolidating essential log sources for thorough security analysis. Databricks ensures continual operational readiness with a 24/7/365 on-call rotation. Additionally, a proactive hunting program and a specialized detection team support the incident response program.",
1041
  "controlCategory": "Out-of-the-box",
1042
  "readableControlId": "DASF 39",
1043
+ "severity": "low",
1044
  "automationPlatforms": ["AZURE_DATABRICKS"],
1045
  "criteria": []
1046
  },
 
1050
  "description": "Databricks personnel, by default, do not have access to customer workspaces or production environments. Access may be temporarily requested by Databricks staff for purposes such as investigating outages, security events or supporting deployments. Customers have the option to disable this access. Additionally, staff activity within these environments is recorded in customer audit logs. Accessing these areas requires multi-factor authentication, and employees must connect to the Databricks VPN.",
1051
  "controlCategory": "Out-of-the-box",
1052
  "readableControlId": "DASF 40",
1053
+ "severity": "low",
1054
  "automationPlatforms": ["AZURE_DATABRICKS"],
1055
  "criteria": []
1056
  },
 
1090
  "description": "Webhooks in the MLflow Model Registry enable you to automate machine learning workflow by triggering actions in response to specific events. These webhooks facilitate seamless integrations, allowing for the automatic execution of various processes. For example, webhooks are used for: CI workflow trigger (Validate your model automatically when creating a new version), Team notifications (Send alerts through a messaging app when a model stage transition request is received), Model fairness evaluation (Invoke a workflow to assess model fairness and bias upon a production transition request), and Automated deployment (Trigger a deployment pipeline when a new tag is created on a model).",
1091
  "controlCategory": "Implementation",
1092
  "readableControlId": "DASF 44",
1093
+ "severity": "high",
1094
  "automationPlatforms": ["AZURE_DATABRICKS"],
1095
  "criteria": []
1096
  },
 
1100
  "description": "Model evaluation is a critical component of the machine learning lifecycle. It provides data scientists with the tools to measure, interpret and explain the performance of their models. MLflow plays a critical role in accelerating model development by offering insights into the reasons behind a model's performance and guiding improvements and iterations. MLflow offers many industry-standard native evaluation metrics for classical machine learning algorithms and LLMs, and also facilitates the use of custom evaluation metrics.",
1101
  "controlCategory": "Implementation",
1102
  "readableControlId": "DASF 45",
1103
+ "severity": "high",
1104
  "automationPlatforms": ["AZURE_DATABRICKS"],
1105
  "criteria": []
1106
  },
 
1110
  "description": "Mosaic AI Vector Search is a vector database that is built into the Databricks Data Intelligence Platform and integrated with its governance and productivity tools. A vector database is a database that is optimized to store and retrieve embeddings. Embeddings are mathematical representations of the semantic content of data, typically text or image data. Embeddings are usually generated by feature extraction models for text, image, audio or multi-modal data, and are a key component of many GenAI applications that depend on finding documents or images that are similar to each other. Examples are RAG systems, recommender systems, and image and video recognition. Databricks implements the following security controls to protect your data: Every customer request to Vector Search is logically isolated, authenticated and authorized, and Mosaic AI Vector Search encrypts all data at rest (AES-256) and in transit (TLS 1.2+).",
1111
  "controlCategory": "Implementation",
1112
  "readableControlId": "DASF 46",
1113
+ "severity": "low",
1114
  "automationPlatforms": ["AZURE_DATABRICKS"],
1115
  "criteria": []
1116
  },
 
1120
  "description": "New, no-code visual tools allow users to compare models' output based on set prompts, which are automatically tracked within MLflow. With integration into Mosaic AI Model Serving, customers can deploy the best model to production. The AI Playground is a chat-like environment where you can test, prompt and compare LLMs.",
1121
  "controlCategory": "Implementation",
1122
  "readableControlId": "DASF 47",
1123
+ "severity": "high",
1124
  "automationPlatforms": ["AZURE_DATABRICKS"],
1125
  "criteria": []
1126
  },
 
1170
  "description": "Databricks' Git Repository integration supports effective code and third-party libraries management, enhancing customer control over their development environment.",
1171
  "controlCategory": "Out-of-the-box",
1172
  "readableControlId": "DASF 52",
1173
+ "severity": "high",
1174
  "automationPlatforms": ["AZURE_DATABRICKS"],
1175
  "criteria": []
1176
  },
 
1180
  "description": "Databricks' library management system allows administrators to manage the installation and usage of third-party libraries effectively. This feature enhances the security and efficiency of systems, pipelines and data by giving administrators precise control over their development environment.",
1181
  "controlCategory": "Out-of-the-box",
1182
  "readableControlId": "DASF 53",
1183
+ "severity": "high",
1184
  "automationPlatforms": ["AZURE_DATABRICKS"],
1185
  "criteria": []
1186
  }