# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------


import os
import unittest

from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
from azure.cli.testsdk.scenario_tests import record_only, AllowLargeResponse

TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))


class HDInsightClusterTests(ScenarioTest):
    location = 'EastAsia'
    vnet_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/hdi-vn-0'
    subnet = 'default'

    # Uses 'rg' kwarg
    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdi-cli', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='cli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_min_args(self, storage_account_info):
        self._create_hdinsight_cluster(self._wasb_arguments(storage_account_info,
                                                            specify_key=False, specify_container=False), self._vnet_arguments())

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_resize(self, storage_account_info):
        self._create_hdinsight_cluster(
            self._wasb_arguments(storage_account_info),
            self._vnet_arguments())

        resize_cluster_format = 'az hdinsight resize -n {cluster} -g {rg} --workernode-count 2'
        self.cmd(resize_cluster_format)

        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.provisioningState', 'Succeeded'),
            self.check('properties.clusterState', 'Running'),
            self.check(
                "properties.computeProfile.roles[?name=='workernode'].targetInstanceCount", [2])
        ])

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_kafka(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._kafka_arguments(),
            HDInsightClusterTests._vnet_arguments()
        )

    # Uses 'rg' kwarg
    # _rest_proxy_arguments() will override location to southcentralus, so use this location for rg and sa
    @unittest.skip('https://github.com/Azure/azure-cli/issues/28860')
    @ResourceGroupPreparer(name_prefix='hdicli-', location='southcentralus', random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location='southcentralus', parameter_name='storage_account')
    def test_hdinsight_cluster_kafka_with_rest_proxy(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._kafka_arguments(),
            HDInsightClusterTests._rest_proxy_arguments()
        )

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_kafka_with_optional_disk_args(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._kafka_arguments(),
            HDInsightClusterTests._optional_data_disk_arguments(),
            HDInsightClusterTests._vnet_arguments()
        )

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_component_version(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._component_version_arguments()
        )

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_cluster_config(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_cluster_config(),
            HDInsightClusterTests._vnet_arguments()
        )

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_ssh_creds(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_explicit_ssh_creds(),
            HDInsightClusterTests._vnet_arguments()
        )

    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_minimal_tls_version(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_minimal_tls_version('1.2'),
            HDInsightClusterTests._vnet_arguments()
        )

        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.minSupportedTlsVersion', '1.2'),
            self.check('properties.clusterState', 'Running')
        ])

    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_encryption_in_transit(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_encryption_in_transit(),
            HDInsightClusterTests._vnet_arguments()
        )

        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.encryptionInTransitProperties.isEncryptionInTransitEnabled', True),
            self.check('properties.clusterState', 'Running')
        ])

    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_loadbased_autoscale(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_load_based_autoscale(),
            HDInsightClusterTests._vnet_arguments()
        )
        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.clusterState', 'Running'),
            self.check(
                "properties.computeProfile.roles[?name=='workernode'].autoscaleConfiguration.capacity.minInstanceCount",
                [4])
        ])

    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_schedulebased_autoscale(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_schedule_based_autoscale(),
            HDInsightClusterTests._vnet_arguments()
        )
        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.clusterState', 'Running'),
            self.check(
                "properties.computeProfile.roles[?name=='workernode'].autoscaleConfiguration.recurrence."
                "schedule[0].timeAndCapacity.minInstanceCount",
                [5])
        ])

    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_encryption_at_host(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_encryption_at_host(),
            HDInsightClusterTests._vnet_arguments()
        )

        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.diskEncryptionProperties.encryptionAtHost', True),
            self.check('properties.clusterState', 'Running')
        ])

    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_private_link_configurations(self, storage_account_info):
        self.kwargs.update(
            {
                'private_link_config_file_path': os.path.join(TEST_DIR, 'privatelinkconfigurations.json')
            }
        )
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_private_link_configurations(self.kwargs['private_link_config_file_path'])
        )

        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.networkProperties.privateLink', "Enabled"),
            self.check('properties.networkProperties.resourceProviderConnection', 'Outbound'),
            self.check('type(properties.privateLinkConfigurations)', 'array'),
            self.check('length(properties.privateLinkConfigurations)', 1),
        ])

    @AllowLargeResponse()
    @unittest.skip("No suitable SKU")
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_compute_isolation(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_compute_isolation(),
            HDInsightClusterTests._vnet_arguments()
        )
        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.computeIsolationProperties.enableComputeIsolation', True)
        ])

    @unittest.skip("Skip this case this time")
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_cluster_with_availability_zones(self, storage_account_info):
        self.kwargs.update(
            {
                'custom_all_meta_stores_file_path': os.path.join(TEST_DIR, 'customallmetastores.json')
            }
        )
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_availability_zones(self.kwargs['custom_all_meta_stores_file_path'])
        )

        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('type(zones)', 'array'),
            self.check('length(zones)', 1),
        ])

    # Uses 'rg' kwarg

    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_application(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_explicit_ssh_creds(),
            HDInsightClusterTests._vnet_arguments()
        )

        # define application item names
        self.kwargs.update({
            'app': self.create_random_name(prefix='hdicliapp-', length=16),
            'script_uri': 'https://hdiconfigactions.blob.core.windows.net/linuxhueconfigactionv02/install-hue-uber-v02.sh',
            'script_action': 'InstallHue',
            'script_params': '"-version latest -port 20000"'
        })

        # create an application and wait for completion
        self.cmd('az hdinsight application create -g {rg} -n {app} --cluster-name {cluster} '
                 '--script-uri {script_uri} --script-action-name {script_action} --script-parameters {script_params}')
        self.cmd('az hdinsight application wait --created -n {app} -g {rg} --cluster-name {cluster}')

        # list all applications
        self.cmd('az hdinsight application list -g {rg} --cluster-name {cluster}', checks=[
            self.check('type(@)', 'array'),
            self.check('length(@)', 1)
        ])

        # get the specific application
        self.cmd('az hdinsight application show -g {rg} -n {app} --cluster-name {cluster}', checks=[
            self.check('name', '{app}'),
            self.check('properties.provisioningState', 'Succeeded'),
            self.check('properties.applicationState', 'Running')
        ])

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_usage(self, storage_account_info):
        self.kwargs.update({
            'loc': self.location
        })

        self.cmd('az hdinsight list-usage -l {loc}', checks=[
            self.check('type(value)', 'array'),
            self.check('length(value)', 12)
        ])

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_monitor(self, storage_account_info):
        self.kwargs.update({
            'ws': self.create_random_name('testws', 20),
            'la_prop_path': os.path.join(TEST_DIR, 'loganalytics.json')
        })

        ws_response = self.cmd('resource create -g {rg} -n {ws} '
                               '--resource-type Microsoft.OperationalInsights/workspaces -p @"{la_prop_path}"') \
            .get_output_in_json()
        ws_customer_id = ws_response['properties']['customerId']

        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_explicit_ssh_creds(),
            HDInsightClusterTests._vnet_arguments()
        )

        # get monitor status
        self.cmd('az hdinsight monitor show -g {rg} -n {cluster}', checks=[
            self.check('clusterMonitoringEnabled', False),
            self.check('workspaceId', None)
        ])

        # enable monitoring
        self.cmd('az hdinsight monitor enable -g {rg} -n {cluster} --workspace {ws} --no-validation-timeout')

        # get monitor status
        self.cmd('az hdinsight monitor show -g {rg} -n {cluster}', checks=[
            self.check('clusterMonitoringEnabled', True),
            self.check('workspaceId', ws_customer_id)
        ])

        # disable monitor
        self.cmd('az hdinsight monitor disable -g {rg} -n {cluster}')

        # get monitor status
        self.cmd('az hdinsight monitor show -g {rg} -n {cluster}', checks=[
            self.check('clusterMonitoringEnabled', False),
            self.check('workspaceId', None)
        ])

    # Uses 'rg' kwarg
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_azure_monitor(self, storage_account_info):
        self.kwargs.update({
            'ws': self.create_random_name('testws', 20),
            'la_prop_path': os.path.join(TEST_DIR, 'loganalytics.json')
        })

        ws_response = self.cmd('resource create -g {rg} -n {ws} '
                               '--resource-type Microsoft.OperationalInsights/workspaces -p @"{la_prop_path}"') \
            .get_output_in_json()
        ws_customer_id = ws_response['properties']['customerId']

        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_explicit_ssh_creds(),
            HDInsightClusterTests._vnet_arguments()
        )

        # get monitor status
        self.cmd('az hdinsight azure-monitor show -g {rg} -n {cluster}', checks=[
            self.check('clusterMonitoringEnabled', False),
            self.check('workspaceId', None)
        ])

        # enable monitoring
        self.cmd('az hdinsight azure-monitor enable -g {rg} -n {cluster} --workspace {ws} --no-validation-timeout')

        # get monitor status
        self.cmd('az hdinsight azure-monitor show -g {rg} -n {cluster}', checks=[
            self.check('clusterMonitoringEnabled', True),
            self.check('workspaceId', ws_customer_id)
        ])

        # disable monitor
        self.cmd('az hdinsight azure-monitor disable -g {rg} -n {cluster}')

        # get monitor status
        self.cmd('az hdinsight azure-monitor show -g {rg} -n {cluster}', checks=[
            self.check('clusterMonitoringEnabled', False),
            self.check('workspaceId', None)
        ])

    # Uses 'rg' kwarg
    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_script_action(self, storage_account_info):
        self.kwargs.update({
            'script_uri': 'https://hdiconfigactions.blob.core.windows.net/linuxgiraphconfigactionv01/giraph-installer-v01.sh',
            'script_action': 'InstallGiraph',
            'script_action_1': 'InstallGiraph1',
            'head_node': 'headnode',
            'worker_node': 'workernode'
        })

        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._with_explicit_ssh_creds(),
            HDInsightClusterTests._vnet_arguments()
        )

        # execute script actions, and persist on success.
        self.cmd('az hdinsight script-action execute -g {rg} -n {script_action} '
                 '--cluster-name {cluster} --script-uri {script_uri} --roles {head_node} {worker_node} --persist-on-success')

        # list script actions and validate script is persisted.
        roles = [self.kwargs['head_node'], self.kwargs['worker_node']]
        self.cmd('az hdinsight script-action list -g {rg} --cluster-name {cluster}', checks=[
            self.check('type(@)', 'array'),
            self.check('length(@)', 1),
            self.check('[0].name', '{script_action}'),
            self.check('[0].uri', '{script_uri}'),
            self.check('[0].roles', roles)
        ])

        # delete script action.
        self.cmd('az hdinsight script-action delete -g {rg} -n {script_action} --cluster-name {cluster}')

        # list script actions and validate script is deleted.
        self.cmd('az hdinsight script-action list -g {rg} --cluster-name {cluster}', checks=[
            self.check('type(@)', 'array'),
            self.check('length(@)', 0)
        ])

        # list script action history and validate script appears there.
        script_actions = self.cmd('az hdinsight script-action list-execution-history -g {rg} --cluster-name {cluster}',
                                  checks=[
                                      self.check('type(@)', 'array'),
                                      self.check('length(@)', 1),
                                      self.check('[0].name', '{script_action}'),
                                      self.check('[0].uri', '{script_uri}'),
                                      self.check('[0].roles', roles),
                                      self.check('[0].status', 'Succeeded')
                                  ]).get_output_in_json()

        # get the script action by ID and validate it's the same action.
        self.kwargs['script_execution_id'] = str(script_actions[0]['scriptExecutionId'])
        script_actions = self.cmd('az hdinsight script-action show-execution-details -g {rg} --cluster-name {cluster} '
                                  '--execution-id {script_execution_id}',
                                  checks=[
                                      self.check('name', '{script_action}')
                                  ])

        # execute script actions, but don't persist on success.
        self.cmd('az hdinsight script-action execute -g {rg} --cluster-name {cluster} '
                 '--name {script_action_1} --script-uri {script_uri} --roles {head_node} {worker_node}')

        # list script action history and validate the new script also appears.
        script_actions = self.cmd('az hdinsight script-action list-execution-history -g {rg} --cluster-name {cluster}',
                                  checks=[
                                      self.check('type(@)', 'array'),
                                      self.check('length(@)', 2),
                                      self.check('[0].name', '{script_action_1}'),
                                      self.check("[0].uri", '{script_uri}'),
                                      self.check("[0].status", 'Succeeded')
                                  ]).get_output_in_json()

        # promote non-persisted script.
        self.kwargs['script_execution_id'] = str(script_actions[0]['scriptExecutionId'])
        script_actions = self.cmd('az hdinsight script-action promote -g {rg} --cluster-name {cluster} '
                                  '--execution-id {script_execution_id}')

        # list script action list and validate the promoted script is the only one there.
        self.cmd('az hdinsight script-action list -g {rg} --cluster-name {cluster}', checks=[
            self.check('type(@)', 'array'),
            self.check('length(@)', 1),
            self.check('[0].name', '{script_action_1}'),
            self.check('[0].uri', '{script_uri}'),
            self.check('[0].roles', roles),
            self.check('[0].status', None)
        ])

        # list script action history and validate both scripts are there.
        script_actions = self.cmd('az hdinsight script-action list-execution-history -g {rg} --cluster-name {cluster}',
                                  checks=[
                                      self.check('type(@)', 'array'),
                                      self.check('length(@)', 2),
                                      self.check('[0].name', '{script_action_1}'),
                                      self.check("[0].uri", '{script_uri}'),
                                      self.check("[0].roles", roles),
                                      self.check("[0].status", 'Succeeded'),
                                      self.check('[1].name', '{script_action}'),
                                      self.check("[1].uri", '{script_uri}'),
                                      self.check("[1].roles", roles),
                                      self.check("[1].status", 'Succeeded')
                                  ])

    # @record_only()
    # @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    # @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    # def test_hdinsight_virtual_machine(self, storage_account_info):
    #     self._create_hdinsight_cluster(
    #         HDInsightClusterTests._wasb_arguments(storage_account_info)
    #     )
    #
    #     # list hosts of the cluster
    #     host_list = self.cmd('az hdinsight host list --resource-group {rg} --cluster-name {cluster}', checks=[
    #         self.check('type(@)', 'array'),
    #         self.exists('[0].name')
    #     ]).get_output_in_json()
    #
    #     target_host = host_list[0]['name']
    #     for host in host_list:
    #         if host['name'].startswith('wn'):
    #             target_host = host['name']
    #             break
    #     self.kwargs['target_host'] = target_host
    #     # restart host of the cluster
    #     self.cmd(
    #         'az hdinsight host restart --resource-group {rg} --cluster-name {cluster} --host-names {target_host} --yes')

    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_autoscale_operation(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._vnet_arguments()
        )

        # enable load-based autoscale
        HDInsightClusterTests.wait_for_hdinsight_cluster_running(self)
        self.cmd(
            'az hdinsight autoscale create --cluster-name {cluster} --resource-group {rg} --type Load '
            '--min-workernode-count 4 --max-workernode-count 5 --yes')
        self.cmd('az hdinsight show --name {cluster} --resource-group {rg}', checks=[
            self.exists("properties.computeProfile.roles[?name=='workernode'].autoscaleConfiguration"),
            self.check(
                "properties.computeProfile.roles[?name=='workernode'].autoscaleConfiguration.capacity.minInstanceCount",
                [4])
        ])
        HDInsightClusterTests.wait_for_hdinsight_cluster_running(self)
        # to get robust
        # update load-based autoscale
        self.cmd(
            'az hdinsight autoscale update --cluster-name {cluster} --resource-group {rg} --min-workernode-count 3')
        self.cmd('az hdinsight show --name {cluster} --resource-group {rg}', checks=[
            self.exists("properties.computeProfile.roles[?name=='workernode'].autoscaleConfiguration"),
            self.check(
                "properties.computeProfile.roles[?name=='workernode'].autoscaleConfiguration.capacity.minInstanceCount",
                [3])
        ])
        HDInsightClusterTests.wait_for_hdinsight_cluster_running(self)
        # show autoscale configuration
        self.cmd('az hdinsight autoscale show --cluster-name {cluster} --resource-group {rg}', checks=[
            self.check("capacity.minInstanceCount", 3)
        ])
        HDInsightClusterTests.wait_for_hdinsight_cluster_running(self)
        # disable autoscale
        self.cmd('az hdinsight autoscale delete --cluster-name {cluster} --resource-group {rg} --yes')
        self.cmd('az hdinsight show --name {cluster} --resource-group {rg}')
        # to get robust
        HDInsightClusterTests.wait_for_hdinsight_cluster_running(self)
        # enable schedule-based autoscale
        self.cmd(
            'az hdinsight autoscale create --cluster-name {cluster} --resource-group {rg} --type Schedule --timezone '
            '"China Standard Time" --days Monday --time 09:00 --workernode-count 4 --yes')
        HDInsightClusterTests.wait_for_hdinsight_cluster_running(self)
        self.cmd('az hdinsight autoscale show --cluster-name {cluster} --resource-group {rg}', checks=[
            self.check("recurrence.schedule[0].days", ["Monday"])
        ])
        HDInsightClusterTests.wait_for_hdinsight_cluster_running(self)
        # add a new schedule condition/
        self.cmd(
            'az hdinsight autoscale condition create --cluster-name {cluster} --resource-group {rg} --days Tuesday '
            '--time 08:00 --workernode-count 5')
        self.cmd('az hdinsight autoscale show --cluster-name {cluster} --resource-group {rg}', checks=[
            self.check("recurrence.schedule[1].days", ["Tuesday"])
        ])
        HDInsightClusterTests.wait_for_hdinsight_cluster_running(self)
        # update schedule condition
        self.cmd(
            'az hdinsight autoscale condition update --cluster-name {cluster} --resource-group {rg} '
            '--index 1 --workernode-count 4')
        self.cmd('az hdinsight autoscale show --cluster-name {cluster} --resource-group {rg}', checks=[
            self.check("recurrence.schedule[1].timeAndCapacity.minInstanceCount", 4)
        ])
        # list schedule conditions
        self.cmd('az hdinsight autoscale condition list --cluster-name {cluster} --resource-group {rg}', checks=[
            self.check('length(@)', 2)
        ])
        # delete schedule condition
        self.cmd(
            'az hdinsight autoscale condition delete --cluster-name {cluster} --resource-group {rg} --index 1 --yes')
        self.cmd('az hdinsight autoscale condition list --cluster-name {cluster} --resource-group {rg}', checks=[
            self.check('length(@)', 1)
        ])

    def test_hdinsight_azure_monitor_agent(self):
        self.kwargs.update({
            'loc': self.location,
            'cluster': 'cli-test-cluster',
            'rg': 'cli-test-rg',
            'workspace_id': '/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli-test-rg/providers/microsoft.operationalinsights/workspaces/ps-la',
            'workspace_key': self.cmd('az monitor log-analytics workspace get-shared-keys --resource-group cli-test-rg --workspace-name ps-la --query primarySharedKey -o tsv').output.strip(),
        })

        # enable azure monitor agent
        self.cmd(
            'az hdinsight azure-monitor-agent enable -n {cluster} --resource-group {rg} --workspace {workspace_id} --primary-key {workspace_key} ')
        # show azure monitor agent
        self.cmd('az hdinsight azure-monitor-agent show -n {cluster} --resource-group {rg}')
        # disable azure monitor agent
        self.cmd('az hdinsight azure-monitor-agent disable -n {cluster} --resource-group {rg}')

    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_update_user_assigned(self, storage_account_info):
        self.kwargs.update({
            'msi1': '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/msi',
        })
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._vnet_arguments()
        )
        # Update manage identity with a UserAssigned msi.
        self.cmd(
            'az hdinsight update --name {cluster} --resource-group {rg} --assign-identity-type UserAssigned --assign-identity {msi1}',
            checks=[
                self.check('identity.type', 'UserAssigned'),
            ])


    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_update_system_assigned(self, storage_account_info):
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._vnet_arguments()
        )
        # Update manage identity with a SystemAssigned msi.
        self.cmd(
            'az hdinsight update --name {cluster} --resource-group {rg} --assign-identity-type SystemAssigned',
            checks=[
                self.check('identity.type', 'SystemAssigned'),
            ])


    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    @StorageAccountPreparer(name_prefix='hdicli', location=location, parameter_name='storage_account')
    def test_hdinsight_update_system_and_user_assigned(self, storage_account_info):
        self.kwargs.update({
            'msi1': '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/msi',
            'msi2': '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/msi2'
        })
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info),
            HDInsightClusterTests._vnet_arguments()
        )

        # Update manage identity with SystemAssigned,UserAssigned msi.
        self.cmd(
            'az hdinsight update --name {cluster} --resource-group {rg} --assign-identity-type "SystemAssigned,UserAssigned" --assign-identity {msi1} {msi2}',
            checks=[
                self.check('identity.type', 'SystemAssigned,UserAssigned'),
            ])

    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    def test_hdinsight_create_with_wasb_and_msi(self):
        storage_account_info = ("hdi-storage-wasb","")
        msi = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/msi"
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info,msi = msi),
            HDInsightClusterTests._vnet_arguments()
        )

    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    def test_hdinsight_create_with_ADLSGen2_and_msi(self):
        storage_account_info = "hdi-storage-adlsgen2"
        msi = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/msi"
        self._create_hdinsight_cluster(
            HDInsightClusterTests._adlsgen2_arguments(storage_account_info,specify_filesystem=False,msi = msi),
            HDInsightClusterTests._vnet_arguments()
        )

    @AllowLargeResponse()
    @ResourceGroupPreparer(name_prefix='hdicli-', location=location, random_name_length=12)
    def test_hdinsight_create_with_entra_user(self):
        storage_account_info = ("hdicli000002","")
        msi = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/msi'
        entra = '"00000000-0000-0000-0000-000000000000","test@example.com"'
        self._create_hdinsight_cluster(
            HDInsightClusterTests._wasb_arguments(storage_account_info,msi=msi),
            HDInsightClusterTests._entra_arguments(entra_user=entra),
            HDInsightClusterTests._vnet_arguments()
        )
        self.kwargs.update({
            'config_path': os.path.join(TEST_DIR, 'entrauserconfig.json'),
            'upn1':'test1@example.com',
            'upn2':'test2@example.com'
        })
        self.cmd('az hdinsight credentials update --name {cluster} --resource-group {rg} --entra-uinfo @"{config_path}" --yes')
        self.cmd('az hdinsight credentials update --name {cluster} --resource-group {rg} --entra-uid {upn1} {upn2} --yes')
        self.cmd('az hdinsight credentials show -n {cluster} --resource-group {rg}')

    @AllowLargeResponse()
    def test_hdinsight_credentials_update(self):
        self.kwargs.update({
            'cluster': 'cli-test-cluster',
            'rg': 'cli-test-rg',
            'http_password': 'Password1!'
        })
        self.cmd('az hdinsight credentials wait --name {cluster} --resource-group {rg} --exists')
        self.cmd('az hdinsight credentials update --name {cluster} --resource-group {rg} --http-password {http_password} --yes')

    def _create_hdinsight_cluster(self, *additional_create_arguments):
        self.kwargs.update({
            'loc': self.location,
            'cluster': self.create_random_name(prefix='cli-', length=16),
            'http_password': 'Password1!',
            'cluster_type': 'spark',
        })

        create_cluster_format = 'az hdinsight create -n {cluster} -g {rg} -l {loc} -p {http_password} -t {cluster_type} ' \
                                + '--no-validation-timeout ' \
                                + ' '.join(additional_create_arguments)

        # Wait some time to improve robustness
        if self.is_live or self.in_recording:
            import time
            time.sleep(60)

        self.cmd(create_cluster_format, checks=[
            self.check('properties.provisioningState', 'Succeeded'),
            self.check('properties.clusterState', 'Running'),
            self.check("properties.computeProfile.roles[?name=='headnode']"
                       ".osProfile.linuxOperatingSystemProfile.username", ['sshuser'])
        ])

        self.cmd('az hdinsight show -n {cluster} -g {rg}', checks=[
            self.check('properties.provisioningState', 'Succeeded'),
            self.check('properties.clusterState', 'Running')
        ])

    @staticmethod
    def _wasb_arguments(storage_account_info, specify_key=False, specify_container=True, msi = None):
        storage_account_name, storage_account_key = storage_account_info
        storage_account_key = storage_account_key.strip()

        key_args = ' --storage-account-key "{}"'.format(storage_account_key) if specify_key else ""
        container_args = ' --storage-container {}'.format('default') if specify_container else ""
        msi_args = ' --storage-account-managed-identity "{}"'.format(msi) if msi else ""
        return '--storage-account {}{}{}{}' \
            .format(storage_account_name, key_args, container_args, msi_args)

    @staticmethod
    def _adlsgen2_arguments(storage_account_info,  specify_filesystem=True, msi = None):
        storage_account_name = storage_account_info
        filesystem_args = ' --storage-filesystem {}'.format('default') if specify_filesystem else ""
        msi_args = ' --storage-account-managed-identity "{}"'.format(msi) if msi else ""
        return '--storage-account {}{}{}' \
            .format(storage_account_name, filesystem_args, msi_args)

    @staticmethod
    def _kafka_arguments():
        return '-t {} --workernode-data-disks-per-node {}'.format('kafka', '4')

    @staticmethod
    def _vnet_arguments():
        return '--vnet-name {} --subnet {} --version 5.1'.format(HDInsightClusterTests.vnet_id, HDInsightClusterTests.subnet)

    @staticmethod
    def _rest_proxy_arguments():
        return '--kafka-management-node-size {} --kafka-client-group-id {} --kafka-client-group-name {} -v 4.0 ' \
               '--component-version {} --location {}' \
            .format('Standard_D4_v2', '7bef90fa-0aa3-4bb4-b4d2-2ae7c14cfe41', 'KafakaRestProperties', 'kafka=2.1',
                    '"South Central US"')

    @staticmethod
    def _optional_data_disk_arguments():
        return '--workernode-data-disk-storage-account-type {} --workernode-data-disk-size {}' \
            .format('Standard_LRS', '1023')

    @staticmethod
    def _component_version_arguments():
        return '-t {} --component-version {} --version {}'.format('spark', 'spark=3.3', '5.1')

    @staticmethod
    def _entra_arguments(entra_user = None,entra_full_info = None):
        if entra_user:
            return '--entra-uid {}'.format(entra_user)
        else :
            return '--entra-uinfo {}'.format(entra_full_info)

    @staticmethod
    def _with_cluster_config():
        return '--cluster-configurations {}'.format(r'{{\"gateway\":{{\"restAuthCredential.username\":\"admin\"}}}}')

    @staticmethod
    def _with_explicit_ssh_creds():
        return '--ssh-user {} --ssh-password {}'.format('sshuser', 'Password1!')

    @staticmethod
    def _with_minimal_tls_version(tls_version):
        return '--minimal-tls-version {}'.format(tls_version)

    @staticmethod
    def _with_encryption_in_transit():
        return '--encryption-in-transit true'

    @staticmethod
    def _with_virtual_netowrk_profile(subnet_name):
        return '--subnet {}'.format(subnet_name)

    @staticmethod
    def _with_load_based_autoscale():
        return '--version 4.0 --autoscale-type Load --autoscale-min-workernode-count 4 --autoscale-max-workernode-count 5'

    @staticmethod
    def _with_schedule_based_autoscale():
        return '--version 4.0 --autoscale-type Schedule --timezone "China Standard Time" --days Monday --time "09:00"' \
               ' --autoscale-workernode-count 5'

    @staticmethod
    def _with_encryption_at_host():
        return '--workernode-size standard_ds12_v2 --headnode-size standard_ds12_v2 ' \
               '--zookeepernode-size standard_ds12_v2 --encryption-at-host true'

    @staticmethod
    def _with_private_link_configurations(private_link_configuration_file):
        return '--version 5.1 -l eastasia ' \
               '--subnet /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/hdi-vn-0/subnets/default '\
               '--vnet-name /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/hdi-vn-0 --subnet default ' \
               '--resource-provider-connection Outbound --public-ip-tag-type FirstPartyUsage --public-ip-tag-value HDInsight --outbound-dependencies-managed-type External '\
               '--enable-private-link --private-link-configurations @"{}" '\
               .format(private_link_configuration_file)

    @staticmethod
    def _with_availability_zones(custome_all_metastores_file):
        return '--version 4.0 -l southcentralus ' \
               '--subnet /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/vnetforcreatcluster/subnets/default ' \
               '--zones 1 --cluster-configurations @"{}"'\
               .format(custome_all_metastores_file)

    @staticmethod
    def _with_compute_isolation():
        return '--version 3.6 -l eastus ' \
               '--enable-compute-isolation --host-sku ESv3-Type2 ' \
               '--workernode-size Standard_E8S_V3 --headnode-size Standard_E8S_V3'
    
    @staticmethod
    def wait_for_hdinsight_cluster_running(self, cluster_name=None, resource_group=None, timeout=3000, interval=60):
        import time
        if cluster_name is None:
            cluster_name = self.kwargs['cluster']
        if resource_group is None:
            resource_group = self.kwargs['rg']
        for _ in range(timeout // interval):
            result = self.cmd(
                f'az hdinsight show --name {cluster_name} --resource-group {resource_group}'
            ).get_output_in_json()
            state = result["properties"]["clusterState"]
            if state == "Running":
                return
            time.sleep(interval)
        raise Exception(f"Cluster {cluster_name} did not reach 'Running' state within {timeout} seconds.")
