from collections.abc import Generator

from assertpy import assert_that
from kubernetes.client.models import V1Pod
import pytest

from utils.client import KubeClient
from utils.kubelet import update_kubelet


@pytest.fixture(scope="function")
def hls_pod(kube_client: KubeClient) -> Generator[V1Pod, None, None]:
    pod = kube_client.install_hls_pod()
    assert_that(pod).is_not_none()
    yield pod
    kube_client.delete_pod(pod.metadata.name, pod.metadata.namespace)


@pytest.fixture(scope="function")
def ls_pod(kube_client: KubeClient) -> Generator[V1Pod, None, None]:
    pod = kube_client.install_ls_pod()
    assert_that(pod).is_not_none()
    yield pod
    kube_client.delete_pod(pod.metadata.name, pod.metadata.namespace)


@pytest.fixture(scope="function")
def be_pod(kube_client: KubeClient) -> Generator[V1Pod, None, None]:
    pod = kube_client.install_be_pod()
    assert_that(pod).is_not_none()
    yield pod
    kube_client.delete_pod(pod.metadata.name, pod.metadata.namespace)


@pytest.fixture(scope='class')
def check_no_volcano(kube_client: KubeClient):
    pods = kube_client.core_v1.list_namespaced_pod(namespace="volcano-system").items
    pod_names = [pod.metadata.name for pod in pods]
    assert_that(any(
        'volcano' in pod_name for pod_name in pod_names
    )).is_false()



def get_rubik_log(kube_client: KubeClient) -> str:
    rubik_pod = kube_client.core_v1.list_namespaced_pod(
        namespace="openfuyao-colocation",
        label_selector="name=colocation-rubik"
    ).items[0]
    return kube_client.core_v1.read_namespaced_pod_log(
        name=rubik_pod.metadata.name,
        namespace=rubik_pod.metadata.namespace,
        container="colocation-rubik",
    )


@pytest.mark.usefixtures("check_no_volcano")
class TestDeploymentWithoutVolcano:
    """
    @description 测试在安装volcano前，部署各Pod的表现是否正确
    """

    @staticmethod
    def test_deploy_all_qos(kube_client: KubeClient, hls_pod: V1Pod, ls_pod: V1Pod, be_pod: V1Pod):
        """Test 2, 3, 4"""
        name, ns = hls_pod.metadata.name, hls_pod.metadata.namespace
        assert_that(kube_client.wait_pod_condition(name, ns, "Pending")).is_true()  # 等待Pod状态更新，不建议直接assert
        assert_that(hls_pod.spec.scheduler_name).is_equal_to('volcano')

        name, ns = ls_pod.metadata.name, ls_pod.metadata.namespace
        assert_that(kube_client.wait_pod_condition(name, ns, "Pending")).is_true()
        assert_that(ls_pod.spec.scheduler_name).is_equal_to('volcano')

        name, ns = be_pod.metadata.name, be_pod.metadata.namespace
        assert_that(kube_client.wait_pod_condition(name, ns, "Pending")).is_true()
        assert_that(be_pod.spec.scheduler_name).is_equal_to('volcano')


@pytest.fixture(scope="class")
def check_volcano(kube_client: KubeClient):
    pods = kube_client.core_v1.list_pod_for_all_namespaces().items
    pod_names = [pod.metadata.name for pod in pods]
    assert_that(any(
        'volcano' in pod_name for pod_name in pod_names
    )).is_true()


@pytest.mark.usefixtures("check_volcano")
class TestDeploymentWithVolcano:
    """
    @description 测试完成混部组件的安装部署后，集群中的相关资源情况，包括：
    - 样例Pod（仅测试用）的运行状态与配置正确性
    - 自动以PriorityClass的存在与正确性
    """
    @staticmethod
    def test_deploy_hls(kube_client: KubeClient, hls_pod: V1Pod):
        """Test 5, 15, 18, 21, 30"""
        name, ns = hls_pod.metadata.name, hls_pod.metadata.namespace
        assert_that(kube_client.wait_pod_condition(name, ns, "Running")).is_true()  # 等待Pod状态更新，不建议直接assert
        assert_that(hls_pod.status.qos_class).is_equal_to('Guaranteed')
        assert_that(hls_pod.metadata.annotations['openfuyao.com/qos-level']).is_equal_to('HLS')
        assert_that(hls_pod.spec.scheduler_name).is_equal_to('volcano')
        assert_that(hls_pod.spec.priority).is_equal_to(10000)
        assert_that(hls_pod.spec.priority_class_name).is_equal_to('priority-hls')
        assert_that(hls_pod.spec.affinity.node_affinity).is_not_none()
        container = hls_pod.spec.containers[0]
        assert_that(container.resources.limits['cpu']).is_equal_to(container.resources.requests['cpu'])

        rubik_log = get_rubik_log(kube_client)
        expected_log = rf"pod {name}\(.*\) has already been set to online\(0\)"
        
        assert_that(rubik_log).matches(expected_log)
        
    @staticmethod
    def test_deploy_ls(kube_client: KubeClient, ls_pod: V1Pod):
        """Test 9, 16, 19, 31"""
        name, ns = ls_pod.metadata.name, ls_pod.metadata.namespace
        assert_that(kube_client.wait_pod_condition(name, ns, "Running")).is_true()  # 等待Pod状态更新，不建议直接assert
        assert_that(ls_pod.status.qos_class).is_equal_to('Burstable')
        assert_that(ls_pod.metadata.annotations['openfuyao.com/qos-level']).is_equal_to('LS')
        assert_that(ls_pod.spec.scheduler_name).is_equal_to('volcano')
        assert_that(ls_pod.spec.priority).is_equal_to(1000)
        assert_that(ls_pod.spec.priority_class_name).is_equal_to('priority-ls')
        assert_that(ls_pod.spec.affinity.node_affinity).is_not_none()

        rubik_log = get_rubik_log(kube_client)
        expected_log = rf"pod {name}\(.*\) has already been set to online\(0\)"
        assert_that(rubik_log).matches(expected_log)

    @staticmethod
    def test_deploy_be(kube_client: KubeClient, be_pod: V1Pod):
        """Test 10, 17, 20, 32, 33"""
        name, ns = be_pod.metadata.name, be_pod.metadata.namespace
        assert_that(kube_client.wait_pod_condition(name, ns, "Running")).is_true()  # 等待Pod状态更新，不建议直接assert
        assert_that(be_pod.status.qos_class).is_equal_to('BestEffort')
        assert_that(be_pod.metadata.annotations['openfuyao.com/qos-level']).is_equal_to('BE')
        assert_that(be_pod.spec.scheduler_name).is_equal_to('volcano')
        assert_that(be_pod.spec.priority).is_equal_to(-1000)
        assert_that(be_pod.spec.priority_class_name).is_equal_to('priority-be')
        assert_that(be_pod.spec.affinity.node_affinity).is_not_none()
        assert_that(be_pod.metadata.annotations["volcano.sh/preemptable"]).is_equal_to('true')
        assert_that(be_pod.metadata.annotations).contains_key('openfuyao.com/extender-resource-cfg')

        rubik_log = get_rubik_log(kube_client)
        expected_log = rf"pod {name}\(.*\) is set to offline\(-1\) successfully"
        assert_that(rubik_log).matches(expected_log)

    @staticmethod
    def test_should_have_default_priorityclass_resources(kube_client: KubeClient):
        """Test 11, 12, 13, 14"""
        priority_class_list = kube_client.list_priorityclass()["items"]
        priority_class_values = [
            {'name': item['metadata']['name'], 'value': item['value']}
            for item in priority_class_list
        ]
        expected = [
            {
                'name': 'priority-hls',
                'value': 10000,
            },
            {
                'name': 'priority-ls',
                'value': 1000,
            },
            {
                'name': 'priority-be',
                'value': -1000,
            },
        ]
        for pc in expected:
            assert_that(priority_class_values).contains(pc)

    @staticmethod
    def test_deploy_hls_without_static_policy(kube_client: KubeClient):
        """Test 6"""
        update_kubelet(kube_client.ssh_client, "none", "none")
        pod = kube_client.install_hls_pod()
        assert_that(pod).is_not_none()
        # 等待pod状态变为Running
        name, ns = pod.metadata.name, pod.metadata.namespace
        try:
            assert_that(kube_client.wait_pod_condition(name, ns, "Running")).is_true()
        finally:
            kube_client.delete_pod(name, ns)
    
    @staticmethod
    def test_deploy_hls_with_static_policy_enough_cpu(kube_client: KubeClient):
        """Test 7"""
        update_kubelet(kube_client.ssh_client, "static", "single-numa-node")
        # hls-pod 使用 1 个 CPU，保证能部署在单一 NUMA
        pod = kube_client.install_hls_pod("1")
        assert_that(pod).is_not_none()
        name, ns = pod.metadata.name, pod.metadata.namespace
        try:
            assert_that(kube_client.wait_pod_condition(name, ns, "Running")).is_true()  # 等待Pod状态更新，不建议直接assert
        finally:
            kube_client.delete_pod(name, ns)
        
    @staticmethod
    def test_deploy_hls_with_static_policy_not_enough_cpu(kube_client: KubeClient, numa_stat: dict):
        """Test 8"""
        update_kubelet(kube_client.ssh_client, "static", "single-numa-node")
        # 将 pod 的 cpu request 和 limit 都改为**大于单个NUMA的CPU核数**
        pod = kube_client.install_hls_pod(str(numa_stat["per_numa"] + 1))
        assert_that(pod).is_not_none()
        name, ns = pod.metadata.name, pod.metadata.namespace
        try:
            assert_that(kube_client.wait_pod_condition(name, ns, "Pending")).is_true()
        finally:
            kube_client.delete_pod(name, ns)

    @staticmethod
    def test_colocation_configmap_optimization(kube_client: KubeClient):
        """Test 27"""
        cm = kube_client.core_v1.read_namespaced_config_map("colocation-config", "openfuyao-colocation")
        assert_that(cm.data.keys()).contains("colocation-options", "rubik-options", "volcano-scheduler-options")
        assert_that(cm.data.keys()).does_not_contain("overread_namespaced_config_mapsubscription-options")
