from collections.abc import Generator

from assertpy import assert_that
from playwright.sync_api import Page, BrowserContext
import yaml
import pytest

from utils.client import KubeClient
import actions.colocation.colocation_configuration as cc


@pytest.fixture(scope="class", autouse=True)
def config_preemption(kube_client: KubeClient):
    configmap = kube_client.core_v1.read_namespaced_config_map(
        name="volcano-scheduler-configmap",
        namespace="volcano-system"
    )
    config = yaml.safe_load(configmap.data["volcano-scheduler.conf"])
    # 检查priority插件已存在
    has_priority = any(
        any(plugin.get('name') == 'priority' for plugin in tier.get('plugins', []))
        for tier in config.get('tiers', [])
    )
    assert_that(has_priority).is_true()
    # 修改actions
    actions = config.get("actions", "")
    actions_list = [a.strip() for a in actions.split(",") if a.strip()]
    if "enqueue" in actions_list:
        actions_list.remove("enqueue")
    if "preempt" not in actions_list:
        actions_list.append("preempt")
    config["actions"] = ", ".join(actions_list)
    # 写回ConfigMap
    configmap.data["volcano-scheduler.conf"] = yaml.safe_dump(config, sort_keys=False)
    kube_client.core_v1.replace_namespaced_config_map(
        name="volcano-scheduler-configmap",
        namespace="volcano-system",
        body=configmap
    )


@pytest.fixture(scope="function", autouse=True)
def conf_modal_page(context: BrowserContext) -> Generator[Page, None, None]:
    """在混部策略配置页面打开负载感知调度"""
    page = context.new_page()
    cc.visit_colocation_config_page(page)
    cc.do_open_modal(page)
    
    if not cc.get_usage_plugin_switch(page).is_checked():
        cc.do_toggle_usage_plugin_switch(page)
        cc.do_click_modal_confirm(page)
    else:
        cc.do_click_modal_cancel(page)
    yield page

    cc.do_open_modal(page)
    cc.do_toggle_usage_plugin_switch(page)
    cc.do_click_modal_confirm(page)
    page.close()


class TestPodPreemption:
    """
    @description 测试多种 qos 级别的 Pod 共存在集群中时，在资源挤占情况下的调度行为是否符合预期
    需要根据集群实际情况（cpu/topologyManagerPolicy、NUMA与CPU数量、测试几群内工作负载等）调整下列Pod的request与limit。
    注意需要保证第一个pod runing，第二个因为资源不足而pending，第三个与第二个资源request相同，且因为优先级配置而running
    """
    @staticmethod
    def test_hls_preempt_ls(kube_client: KubeClient):
        """Test 23"""
        ls_pod_1 = ls_pod_2 = hls_pod = None
        ls_name_1 = ls_name_2 = hls_name = None
        ls_ns_1 = ls_ns_2 = hls_ns = None
        try:
            ls_pod_1 = kube_client.install_ls_pod(name="ls-pod-1")
            ls_name_1, ls_ns_1 = ls_pod_1.metadata.name, ls_pod_1.metadata.namespace
            assert_that(kube_client.wait_pod_condition(ls_name_1, ls_ns_1, "Running")).is_true()

            ls_pod_2 = kube_client.install_ls_pod(name="ls-pod-2")
            ls_name_2, ls_ns_2 = ls_pod_2.metadata.name, ls_pod_2.metadata.namespace
            assert_that(kube_client.wait_pod_condition(ls_name_2, ls_ns_2, "Pending")).is_true()

            hls_pod = kube_client.install_hls_pod()
            hls_name, hls_ns = hls_pod.metadata.name, hls_pod.metadata.namespace
            assert_that(kube_client.wait_pod_condition(hls_name, hls_ns, "Running")).is_true()
            # 有 ls pod 被抢占删除
            ls_pod_1 = kube_client.is_pod_existing(ls_name_1, ls_ns_1)
            ls_pod_2 = kube_client.is_pod_existing(ls_name_2, ls_ns_2)
            assert_that(ls_pod_1 and ls_pod_2).is_false()
        finally:
            for name, ns in [
                (ls_name_1, ls_ns_1),
                (ls_name_2, ls_ns_2),
                (hls_name, hls_ns)
            ]:
                if name and ns:
                    try:
                        kube_client.delete_pod(name, ns)
                    finally:
                        kube_client.logger.info(f"Pod {name} 已删除或未创建")

    @staticmethod
    def test_ls_preempt_be(kube_client: KubeClient):
        """Test 24"""
        be_pod_1 = be_pod_2 = ls_pod = None
        be_name_1 = be_name_2 = ls_name = None
        be_ns_1 = be_ns_2 = ls_ns = None
        try:
            be_pod_1 = kube_client.install_be_pod(name="be-pod-1")
            be_name_1, be_ns_1 = be_pod_1.metadata.name, be_pod_1.metadata.namespace
            assert_that(kube_client.wait_pod_condition(be_name_1, be_ns_1, "Running")).is_true()

            be_pod_2 = kube_client.install_be_pod(name="be-pod-2")
            be_name_2, be_ns_2 = be_pod_2.metadata.name, be_pod_2.metadata.namespace
            assert_that(kube_client.wait_pod_condition(be_name_2, be_ns_2, "Pending")).is_true()

            ls_pod = kube_client.install_ls_pod()
            ls_name, ls_ns = ls_pod.metadata.name, ls_pod.metadata.namespace
            assert_that(kube_client.wait_pod_condition(ls_name, ls_ns, "Running")).is_true()
            # 有 be pod 被抢占删除
            be_pod_1 = kube_client.is_pod_existing(be_name_1, be_ns_1)
            be_pod_2 = kube_client.is_pod_existing(be_name_2, be_ns_2)
            assert_that(be_pod_1 and be_pod_2).is_false()
        finally:
            for name, ns in [
                (be_name_1, be_ns_1),
                (be_name_2, be_ns_2),
                (ls_name, ls_ns)
            ]:
                if name and ns:
                    try:
                        kube_client.delete_pod(name, ns)
                    finally:
                        kube_client.logger.info(f"Pod {name} 已删除或未创建")
