#######################################################################
# Copyright (c) 2025 Huawei Technologies Co., Ltd.
# Openfuyao is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#     http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
#######################################################################

import argparse
from dataclasses import dataclass
import logging
import subprocess
import time

from kubernetes import config, client
import yaml

from util import format_requests_num, check_resource_num

# 全局变量，用于存储从命令行读取的参数值
LOG_DIR = "/usr/local/redis-test"

config.load_kube_config()
core_v1 = client.CoreV1Api()
batch_v1 = client.BatchV1Api()

logging.basicConfig(
    level=logging.INFO,
    format="%(levelname)s: %(message)s",
)


def read_client_obj(template_job: str):
    with open(template_job, "r") as f:
        temp_job = yaml.safe_load(f)
    return temp_job


def update_job(
    body: dict,
    name: str,
    redis_cmd_args: list[str],
    cpu_requests: str = "",
    cpu_limits: str = "",
) -> dict:
    if cpu_requests and not check_resource_num(cpu_requests):
        raise ValueError(f"cpu request is not valid: {cpu_requests}")
    if cpu_limits and not check_resource_num(cpu_limits):
        raise ValueError(f"cpu limits is not valid: {cpu_limits}")

    body["metadata"]["name"] = name
    container = body["spec"]["template"]["spec"]["containers"][0]
    container["args"] = redis_cmd_args

    # template has empty resources by default, unless u modify it
    if cpu_requests or cpu_limits:
        if not container.get("resource", None):
            container["resource"] = {
                "requests": {"memory": "8Gi"},
                "limits": {"memory": "8Gi"},
            }
    if cpu_requests:
        container["resources"]["requests"]["cpu"] = cpu_requests
    if cpu_limits:
        container["resources"]["limits"]["cpu"] = cpu_limits

    return body


def install_job(job: dict):
    batch_v1.create_namespaced_job(job["metadata"]["namespace"], job)


def wait_for_job_complete(name: str, namespace: str, wait_timeout: int = 60):
    try:
        cmd = [
            "kubectl",
            "wait",
            "--for=condition=complete",
            f"job/{name}",
            f"--namespace={namespace}",
            f"--timeout={wait_timeout}s",
        ]
        subprocess.run(cmd, check=True)
        return True
    except subprocess.CalledProcessError:
        return False


def cleanup_client(job_name: str, namespace: str):
    batch_v1.delete_namespaced_job(job_name, namespace)
    core_v1.delete_collection_namespaced_pod(
        namespace, label_selector=f"job-name={job_name}"
    )
    logging.info(f"已清除 Job {job_name}")


@dataclass
class RedisClientParams:
    run_time: int
    test_type: str
    base_yaml: str
    cpu_requests: str
    cpu_limits: str
    redis_host: str
    redis_port: str
    redis_request_num: int
    redis_client_num: int
    redis_op_type: str
    redis_threads: int


def run_redis_client_benchmark(params: RedisClientParams):
    for i in range(1, params.run_time + 1):
        job_name = f"redis-client-test-{i}"
        log_file = f"log-{params.test_type}-{format_requests_num(params.redis_request_num)}-{i}.log"
        redis_benchmark_args = [
            "redis-benchmark",
            "-h", params.redis_host,
            "-p", params.redis_port,
            "-c", str(params.redis_client_num),
            "-n", str(params.redis_request_num),
            "-t", params.redis_op_type,
            "--threads", str(params.redis_threads),
        ]
        # 将命令和日志重定向作为单个字符串传递给 shell
        full_command = " ".join(redis_benchmark_args) + f" >> {LOG_DIR}/{log_file}"

        job = read_client_obj(params.base_yaml)
        job_ns = job["metadata"]["namespace"]
        logging.info(f"安装 Job {job_name}，命名空间 {job_ns}")
        job = update_job(
            body=job,
            name=job_name,
            redis_cmd_args=[full_command],
            cpu_requests=params.cpu_requests,
            cpu_limits=params.cpu_limits,
        )

        logging.info(f"====开始第{i}次测试 类型: {params.test_type}====")
        logging.info(f"CPU request/limit: {params.cpu_requests}/{params.cpu_limits})")
        try:
            install_job(job)
        except Exception as e:
            logging.error(f"Job ${job_name} 安装失败: {job}")
            logging.error(f"{e.body}")
            return

        if not wait_for_job_complete(job_name, job_ns):
            logging.error(f"Job {job_name} 运行失败")
            status = batch_v1.read_namespaced_job_status(job_name, job_ns)
            if isinstance(status, client.V1Job):
                logging.info("Job 状态：")
                logging.info(status.status)
            pods = core_v1.list_namespaced_pod(
                job_ns, label_selector=f"job-name={job_name}"
            )
            for pod_idx, pod in enumerate(pods.items):
                pod_name = pod.metadata.name
                try:
                    log = core_v1.read_namespaced_pod_log(pod_name, job_ns)
                    logging.info(f"----第{pod_idx + 1}次Pod f{pod_name}日志----")
                    logging.info(log)
                    logging.info("----")
                except Exception as e:
                    logging.error(f"无法获取Pod {pod_name} 日志")
                    return
            cleanup_client(job_name, job_ns)
        else:
            logging.info(f"Job ${job_name} 运行完成，结果保存至 {log_file}")
            cleanup_client(job_name, job_ns)

        time.sleep(5)

    logging.info("====所有测试已完成====")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Redis客户端基准测试脚本")

    parser.add_argument(
        "-n",
        "--run-time",
        type=int,
        default=1,
        help="基准测试运行的次数",
    )
    parser.add_argument(
        "-t",
        "--test-type",
        type=str,
        default="close",
        help="测试类型(例如: close)",
    )
    parser.add_argument(
        "--base-yaml",
        type=str,
        default="redis/test-redis-client.yaml",
        help="Redis客户端job的基础YAML文件",
    )
    parser.add_argument(
        "--cpu-requests",
        type=str,
        default="",
        help="客户端pod的CPU请求值",
    )
    parser.add_argument(
        "--cpu-limits",
        type=str,
        default="",
        help="客户端pod的CPU限制值",
    )
    parser.add_argument(
        "--redis-host",
        type=str,
        default="redis-server-service",
        help="Redis服务器主机地址",
    )
    parser.add_argument(
        "--redis-port", 
        type=str, 
        default="6379", 
        help="Redis服务器端口号"
    )
    parser.add_argument(
        "-r",
        "--redis-request-num",
        type=int,
        default=1000000,
        help="redis-benchmark的请求数量",
    )
    parser.add_argument(
        "-c",
        "--redis-client-num",
        type=int,
        default=50,
        help="redis-benchmark的客户端数量",
    )
    parser.add_argument(
        "-o",
        "--redis-op-type",
        type=str,
        default="get",
        help="Redis操作类型(例如: get, set)",
    )
    parser.add_argument(
        "--redis-threads",
        type=int,
        default=1,
        help="redis-benchmark的线程数量",
    )
    args = parser.parse_args()

    params = RedisClientParams(
        run_time=args.run_time,
        test_type=args.test_type,
        base_yaml=args.base_yaml,
        cpu_requests=args.cpu_requests,
        cpu_limits=args.cpu_limits,
        redis_host=args.redis_host,
        redis_port=args.redis_port,
        redis_request_num=args.redis_request_num,
        redis_client_num=args.redis_client_num,
        redis_op_type=args.redis_op_type,
        redis_threads=args.redis_threads,
    )
    run_redis_client_benchmark(params)
