import json
import os
import sys
import logging
import re

from multiprocessing import Pool, Manager
from datetime import datetime

import boto3
import pytest

from botocore.config import Config
from invoke import run
from invoke.context import Context
from test_utils import eks as eks_utils
from test_utils import sagemaker as sm_utils
from test_utils import metrics as metrics_utils
from test_utils import (
    get_dlc_images,
    is_pr_context,
    is_efa_dedicated,
    is_ec2_image,
    destroy_ssh_keypair,
    setup_sm_benchmark_tf_train_env,
    setup_sm_benchmark_mx_train_env,
    setup_sm_benchmark_hf_infer_env,
    get_framework_and_version_from_tag,
    get_build_context,
    is_nightly_context,
    generate_unique_dlc_name,
)
from test_utils import KEYS_TO_DESTROY_FILE
from test_utils.pytest_cache import PytestCache
from test.vllm.trigger_test import test as test_vllm
from infra.test_infra.entrypoint import main as run_new_tests

from src.codebuild_environment import get_codebuild_project_name

LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
LOGGER.addHandler(logging.StreamHandler(sys.stdout))
pytest_cache_util = PytestCache(
    boto3.client("s3"), boto3.client("sts").get_caller_identity()["Account"]
)


def run_sagemaker_local_tests(images, pytest_cache_params):
    """
    Function to run the SageMaker Local tests
    :param images: <list> List of all images to be used in SageMaker tests
    :param pytest_cache_params: <dict> dictionary with data required for pytest cache handler
    """
    if not images:
        return
    # Run sagemaker Local tests
    framework, _ = get_framework_and_version_from_tag(images[0])
    framework = framework.replace("_trcomp", "").replace("stabilityai_", "")
    sm_tests_path = (
        os.path.join("test", "sagemaker_tests", framework)
        if "huggingface" not in framework
        else os.path.join("test", "sagemaker_tests", "huggingface*")
    )
    sm_tests_tar_name = "sagemaker_tests.tar.gz"
    run(
        f"tar -cz --exclude='*.pytest_cache' --exclude='__pycache__' -f {sm_tests_tar_name} {sm_tests_path}"
    )

    pool_number = len(images)
    with Pool(pool_number) as p:
        test_results = p.starmap(
            sm_utils.execute_local_tests, [[image, pytest_cache_params] for image in images]
        )
    if not all(test_results):
        failed_images = [images[index] for index, result in enumerate(test_results) if not result]
        raise RuntimeError(
            f"SageMaker Local tests failed on the following DLCs:\n"
            f"{json.dumps(failed_images, indent=4)}"
        )


def run_sagemaker_test_in_executor(image, num_of_instances, instance_type):
    """
    Run pytest in a virtual env for a particular image

    Expected to run under multi-threading

    :param num_of_instances: <int> number of instances the image test requires
    :param instance_type: type of sagemaker instance the test needs
    :param image: ECR url
    :return:
    """
    import log_return

    LOGGER.info("Started running SageMaker test.....")
    pytest_command, path, tag, job_type = sm_utils.generate_sagemaker_pytest_cmd(image, "sagemaker")

    # update resource pool accordingly, then add a try-catch statement here to update the pool in case of failure
    try:
        log_return.update_pool("running", instance_type, num_of_instances, job_type)
        context = Context()
        with context.cd(path):
            context.run(f"python3 -m virtualenv {tag}")
            with context.prefix(f"source {tag}/bin/activate"):
                context.run("pip install -r requirements.txt", warn=True)
                context.run(pytest_command)
    except Exception as e:
        LOGGER.error(e)
        return False

    return True


def print_log_stream(logs):
    """
    print the log stream from Job Executor
    :param logs: <dict> the returned dict from JobRequester.receive_logs
    """
    LOGGER.info("Log stream from Job Executor.....")
    print(logs["LOG_STREAM"])
    LOGGER.info("Print log stream complete.")


def run_sagemaker_remote_tests(images, pytest_cache_params):
    """
    Function to set up multiprocessing for SageMaker tests
    :param images: <list> List of all images to be used in SageMaker tests
    """
    executor_mode = os.getenv("EXECUTOR_MODE", "False").lower() == "true"

    if executor_mode:
        LOGGER.info("entered executor mode.")
        import log_return

        num_of_instances = os.getenv("NUM_INSTANCES")
        image = images[0]
        job_type = "training" if "training" in image else "inference"
        instance_type = sm_utils.assign_sagemaker_remote_job_instance_type(image)
        test_succeeded = run_sagemaker_test_in_executor(image, num_of_instances, instance_type)

        tag = image.split("/")[-1].split(":")[-1]
        test_report = os.path.join(os.getcwd(), "test", f"{tag}.xml")

        # update in-progress pool, send the xml reports
        if test_succeeded:
            log_return.update_pool(
                "completed", instance_type, num_of_instances, job_type, test_report
            )
        else:
            log_return.update_pool(
                "runtimeError", instance_type, num_of_instances, job_type, test_report
            )
        return
    else:
        if not images:
            return
        pool_number = len(images)
        # Using Manager().dict() since it's a thread safe dictionary
        global_pytest_cache = Manager().dict()
        try:
            with Pool(pool_number) as p:
                p.starmap(
                    sm_utils.execute_sagemaker_remote_tests,
                    [
                        [i, images[i], global_pytest_cache, pytest_cache_params]
                        for i in range(pool_number)
                    ],
                )
        finally:
            pytest_cache_util.convert_cache_json_and_upload_to_s3(
                global_pytest_cache, **pytest_cache_params
            )


def pull_dlc_images(images):
    """
    Pulls DLC images to CodeBuild jobs before running PyTest commands
    """
    for image in images:
        run(f"docker pull {image}", hide="out")


def setup_sm_benchmark_env(dlc_images, test_path):
    # The plan is to have a separate if/elif-condition for each type of image
    if re.search(r"huggingface-(tensorflow|pytorch|mxnet)-inference", dlc_images):
        resources_location = os.path.join(test_path, "huggingface", "inference", "resources")
        setup_sm_benchmark_hf_infer_env(resources_location)
    elif "tensorflow-training" in dlc_images:
        tf1_images_in_list = (
            re.search(r"tensorflow-training:(^ )*1(\.\d+){2}", dlc_images) is not None
        )
        tf2_images_in_list = (
            re.search(r"tensorflow-training:(^ )*2(\.\d+){2}", dlc_images) is not None
        )
        resources_location = os.path.join(test_path, "tensorflow", "training", "resources")
        setup_sm_benchmark_tf_train_env(resources_location, tf1_images_in_list, tf2_images_in_list)
    elif "mxnet-training" in dlc_images:
        resources_location = os.path.join(test_path, "mxnet", "training", "resources")
        setup_sm_benchmark_mx_train_env(resources_location)


def delete_key_pairs(keys_to_delete_file):
    """
    Function to delete key pairs from a file in mainline context

    :param keys_to_delete_file: file with all of the keys to delete
    """
    ec2_client = boto3.client("ec2", config=Config(retries={"max_attempts": 10}))
    with open(keys_to_delete_file) as f:
        for key_file in f:
            key_file = key_file.strip()
            LOGGER.info(f"Destroying {key_file} listed in {keys_to_delete_file}")
            destroy_ssh_keypair(ec2_client, key_file)


def build_bai_docker_container():
    """
    Builds docker container with necessary script requirements (bash 5.0+,conda)
    """
    # Assuming we are in dlc_tests directory
    docker_dir = os.path.join("benchmark", "bai", "docker")
    ctx = Context()
    with ctx.cd(docker_dir):
        ctx.run("docker build -t bai_env_container -f Dockerfile .")


def run_vllm_tests(test_type, all_image_list, new_test_structure_enabled):
    """
    Helper function to run vLLM tests for different test types
    """
    try:
        LOGGER.info(f"Running vLLM {test_type.upper()} tests with image: {all_image_list[0]}")
        if new_test_structure_enabled:
            LOGGER.info("Using new buildspec-based test system")
            run_new_tests()
        else:
            LOGGER.info("Using legacy test system")
            test_vllm()
    except Exception as e:
        LOGGER.error(f"vLLM {test_type.upper()} tests failed: {str(e)}")
        raise


def main():
    # Define constants
    start_time = datetime.now()
    test_type = os.getenv("TEST_TYPE")

    efa_dedicated = is_efa_dedicated()
    executor_mode = os.getenv("EXECUTOR_MODE", "False").lower() == "true"
    dlc_images = os.getenv("DLC_IMAGE") if executor_mode else get_dlc_images()
    # Enable IPv6 testing from environment variable
    ipv6_enabled = os.getenv("ENABLE_IPV6_TESTING", "false").lower() == "true"
    os.environ["ENABLE_IPV6_TESTING"] = "true" if ipv6_enabled else "false"

    # Enable new test structure path from environment variable
    new_test_structure_enabled = os.getenv("USE_NEW_TEST_STRUCTURE", "false").lower() == "true"
    os.environ["USE_NEW_TEST_STRUCTURE"] = "true" if new_test_structure_enabled else "false"

    # Executing locally ona can provide commit_id or may ommit it. Assigning default value for local executions:
    commit_id = os.getenv("CODEBUILD_RESOLVED_SOURCE_VERSION", default="unrecognised_commit_id")
    LOGGER.info(f"Images tested: {dlc_images}")
    all_image_list = dlc_images.split(" ")
    standard_images_list = [image_uri for image_uri in all_image_list if "example" not in image_uri]
    # Do not create EKS cluster for when EIA Only Images are present
    is_all_images_list_eia = all("eia" in image_uri for image_uri in all_image_list)
    eks_cluster_name = None
    benchmark_mode = "benchmark" in test_type
    specific_test_type = (
        re.sub("benchmark-", "", test_type) if "benchmark" in test_type else test_type
    )
    build_context = get_build_context()

    # Skip non-sanity/security test suites for base images in MAINLINE context
    # Skip non-sanity/security/eks test suites for vllm images in MAINLINE context
    if build_context == "MAINLINE":
        if all("base" in image_uri for image_uri in all_image_list) and test_type not in {
            "functionality_sanity",
            "security_sanity",
        }:
            LOGGER.info(
                f"NOTE: {specific_test_type} tests not supported on base images. Skipping..."
            )
            return
        elif all("vllm" in image_uri for image_uri in all_image_list) and test_type not in {
            "functionality_sanity",
            "security_sanity",
            "eks",
            "ec2",
            "sagemaker",
        }:
            LOGGER.info(
                f"NOTE: {specific_test_type} tests not supported on vllm images. Skipping..."
            )
            return
        elif all("sglang" in image_uri for image_uri in all_image_list) and test_type not in {
            "functionality_sanity",
            "security_sanity",
            "sagemaker",
        }:
            LOGGER.info(
                f"NOTE: {specific_test_type} tests not supported on sglang images. Skipping..."
            )
            return
    # quick_checks tests don't have images in it. Using a placeholder here for jobs like that
    try:
        framework, version = get_framework_and_version_from_tag(all_image_list[0])
    except:
        framework, version = "general_test", "none"

    # Add pipeline execution as additional differentiator for shared project names on different pipelines
    pipeline_execution = os.getenv("CODEPIPELINE_EXECUTION_ID")

    pytest_cache_params = {
        "codebuild_project_name": get_codebuild_project_name(),
        "commit_id": commit_id if not pipeline_execution else f"{commit_id}_{pipeline_execution}",
        "framework": generate_unique_dlc_name(all_image_list[0]),
        "version": version,
        "build_context": build_context,
        "test_type": test_type,
    }

    if benchmark_mode:
        test_path = os.path.join("benchmark", specific_test_type)
    elif specific_test_type == "telemetry":
        test_path = "ec2"
    elif "sanity" in specific_test_type:
        test_path = "sanity"
    else:
        test_path = specific_test_type

    # Skipping non HuggingFace/AG specific tests to execute only sagemaker tests
    is_hf_image_present = any("huggingface" in image_uri for image_uri in all_image_list)
    is_ag_image_present = any("autogluon" in image_uri for image_uri in all_image_list)
    is_trcomp_image_present = any("trcomp" in image_uri for image_uri in all_image_list)
    is_hf_image_present = is_hf_image_present and not is_trcomp_image_present
    is_hf_trcomp_image_present = is_hf_image_present and is_trcomp_image_present
    if (
        (is_hf_image_present or is_ag_image_present)
        and specific_test_type in ("ecs", "ec2", "eks", "bai")
    ) or (
        is_hf_trcomp_image_present
        and (
            specific_test_type in ("ecs", "eks", "bai", "release_candidate_integration")
            or benchmark_mode
        )
    ):
        # Creating an empty file for because codebuild job fails without it
        LOGGER.info(
            f"NOTE: {specific_test_type} tests not supported on HF, AG or Trcomp. Skipping..."
        )
        report = os.path.join(os.getcwd(), "test", f"{test_type}.xml")
        sm_utils.generate_empty_report(report, test_type, "huggingface")
        return

    if specific_test_type in (
        "security_sanity",
        "functionality_sanity",
        "telemetry",
        "ecs",
        "ec2",
        "eks",
        "canary",
        "deep-canary",
        "bai",
        "quick_checks",
        "release_candidate_integration",
    ):
        pytest_rerun_arg = "--reruns=1"
        pytest_rerun_delay_arg = "--reruns-delay=10"
        report = os.path.join(os.getcwd(), "test", f"{test_type}.xml")
        # The following two report files will only be used by EKS tests, as eks_train.xml and eks_infer.xml.
        # This is to sequence the tests and prevent one set of tests from waiting too long to be scheduled.
        report_train = os.path.join(os.getcwd(), "test", f"{test_type}_train.xml")
        report_infer = os.path.join(os.getcwd(), "test", f"{test_type}_infer.xml")
        report_multinode_train = os.path.join(os.getcwd(), "test", f"eks_multinode_train.xml")

        # PyTest must be run in this directory to avoid conflicting w/ sagemaker_tests conftests
        os.chdir(os.path.join("test", "dlc_tests"))

        # Pull images for necessary tests
        if "sanity" in specific_test_type:
            pull_dlc_images(all_image_list)
        if specific_test_type == "bai":
            build_bai_docker_container()
        if specific_test_type in ["eks", "ec2"] and not is_all_images_list_eia:
            frameworks_in_images = [
                framework
                for framework in ("mxnet", "pytorch", "tensorflow", "vllm")
                if framework in dlc_images
            ]
            if len(frameworks_in_images) != 1:
                raise ValueError(
                    f"All images in dlc_images must be of a single framework for EKS tests.\n"
                    f"Instead seeing {frameworks_in_images} frameworks."
                )
            framework = frameworks_in_images[0]

            if framework == "vllm":
                run_vllm_tests(f"{specific_test_type}", all_image_list, new_test_structure_enabled)
                return

            eks_cluster_name = f"dlc-{framework}-{build_context}"
            eks_utils.eks_setup()
            if eks_utils.is_eks_cluster_active(eks_cluster_name):
                eks_utils.eks_write_kubeconfig(eks_cluster_name)
            else:
                raise Exception(f"EKS cluster {eks_cluster_name} is not in active state")

        # Get specified tests if any
        specified_tests = os.getenv("SPECIFIED_TESTS")
        if specified_tests:
            specified_tests = specified_tests.split()

        # Execute dlc_tests pytest command
        pytest_cmd = [
            "-s",
            "-rA",
            test_path,
            f"--junitxml={report}",
            "-n=auto",
        ]
        if specified_tests:
            test_expr = " or ".join(f"test_{t}" for t in specified_tests)
            pytest_cmd.extend(["-k", f"({test_expr})"])

        is_habana_image = any("habana" in image_uri for image_uri in all_image_list)
        if specific_test_type == "ec2":
            if is_habana_image:
                context = Context()
                context.run("git clone https://github.com/HabanaAI/gaudi-test-suite.git")
                context.run("tar -c -f gaudi-test-suite.tar.gz gaudi-test-suite")
            else:
                pytest_cmd += [
                    "--dist=worksteal",
                    pytest_rerun_arg,
                    pytest_rerun_delay_arg,
                    "--rerun-except=SerialTestCaseExecutorException",
                ]
        if is_pr_context():
            if specific_test_type == "eks":
                pytest_cmd.append("--timeout=2340")
            else:
                if is_habana_image:
                    pytest_cmd.append("--timeout=18000")
                else:
                    pytest_cmd.append("--timeout=4860")

        pytest_cmds = [pytest_cmd]
        # Execute separate cmd for canaries
        if specific_test_type in ["canary", "deep-canary", "quick_checks"]:
            pytest_cmds = [
                [
                    "-s",
                    "-rA",
                    f"--junitxml={report}",
                    "-n=auto",
                    f"--{specific_test_type}",
                    "--ignore=container_tests/",
                ]
            ]
            canary_pytest_args = []
            if specific_test_type in ["canary"]:
                # Add rerun flag to canaries to avoid flakiness
                canary_pytest_args = [pytest_rerun_arg, pytest_rerun_delay_arg]
            if specific_test_type in ["deep-canary"]:
                # Add rerun flag to canaries to avoid flakiness. Force pytest to collect only
                # deep-canary tests, and prevent wastage of threads in the skipping of
                # non-deep-canary tests.
                canary_pytest_args = [pytest_rerun_arg, pytest_rerun_delay_arg, "-m", "deep_canary"]
            pytest_cmds = [pytest_cmd + canary_pytest_args for pytest_cmd in pytest_cmds]

        pytest_cmds = [
            pytest_cmd + ["--last-failed", "--last-failed-no-failures", "all"]
            for pytest_cmd in pytest_cmds
        ]
        pytest_cache_util.download_pytest_cache_from_s3_to_local(os.getcwd(), **pytest_cache_params)
        try:
            # Note:- Running multiple pytest_cmds in a sequence will result in the execution log having two
            #        separate pytest reports, both of which must be examined in case of a manual review of results.
            cmd_exit_statuses = [pytest.main(pytest_cmd) for pytest_cmd in pytest_cmds]
            if all([status == 0 for status in cmd_exit_statuses]):
                sys.exit(0)
            elif any([status != 0 for status in cmd_exit_statuses]) and is_nightly_context():
                LOGGER.warning("\nSuppressed Failed Nightly Tests")
                for index, status in enumerate(cmd_exit_statuses):
                    if status != 0:
                        LOGGER.warning(
                            f'"{pytest_cmds[index]}" tests failed. Status code: {status}'
                        )
                sys.exit(0)
            else:
                raise RuntimeError(pytest_cmds)
        finally:
            pytest_cache_util.upload_pytest_cache_from_local_to_s3(
                os.getcwd(), **pytest_cache_params
            )
            # Delete dangling EC2 KeyPairs
            if os.path.exists(KEYS_TO_DESTROY_FILE):
                delete_key_pairs(KEYS_TO_DESTROY_FILE)
    elif specific_test_type == "sagemaker":
        if "vllm" in dlc_images:
            run_vllm_tests("sagemaker", all_image_list, new_test_structure_enabled)
            return

        if "habana" in dlc_images:
            LOGGER.info(f"Skipping SM tests for Habana. Images: {dlc_images}")
            # Creating an empty file for because codebuild job fails without it
            report = os.path.join(os.getcwd(), "test", f"{test_type}.xml")
            sm_utils.generate_empty_report(report, test_type, "habana")
            return
        if benchmark_mode:
            if "neuron" in dlc_images:
                LOGGER.info(f"Skipping benchmark sm tests for Neuron. Images: {dlc_images}")
                # Creating an empty file for because codebuild job fails without it
                report = os.path.join(os.getcwd(), "test", f"{test_type}.xml")
                sm_utils.generate_empty_report(report, test_type, "neuron")
                return
            report = os.path.join(os.getcwd(), "test", f"{test_type}.xml")
            os.chdir(os.path.join("test", "dlc_tests"))

            setup_sm_benchmark_env(dlc_images, test_path)
            pytest_cmd = [
                "-s",
                "-rA",
                test_path,
                f"--junitxml={report}",
                "-n=auto",
                "-o",
                "norecursedirs=resources",
            ]

            pytest_cmd += ["--efa"] if efa_dedicated else ["-m", "not efa"]

            status = pytest.main(pytest_cmd)
            if is_nightly_context() and status != 0:
                LOGGER.warning("\nSuppressed Failed Nightly Tests")
                LOGGER.warning(f'"{pytest_cmd}" tests failed. Status code: {status}')
                sys.exit(0)
            else:
                sys.exit(status)

        else:
            sm_remote_images = [
                image
                for image in standard_images_list
                if not (("tensorflow-inference" in image and "py2" in image) or is_ec2_image(image))
            ]
            run_sagemaker_remote_tests(sm_remote_images, pytest_cache_params)
            if standard_images_list and not sm_remote_images:
                report = os.path.join(os.getcwd(), "test", f"{test_type}.xml")
                sm_utils.generate_empty_report(report, test_type, "sm_remote_unsupported")
        metrics_utils.send_test_duration_metrics(start_time)

    elif specific_test_type == "sagemaker-local":
        sm_local_to_skip = {
            "habana": "Skipping SM tests because SM does not yet support Habana",
            "neuron": "Skipping - there are no local mode tests for Neuron",
            "huggingface-tensorflow-training": "Skipping - there are no local mode tests for HF TF training",
            "vllm": "Skipping - there are no local mode tests for VLLM",
            "sglang": "Skipping - there are no local mode tests for sglang",
        }

        for skip_condition, reason in sm_local_to_skip.items():
            if skip_condition in dlc_images:
                LOGGER.info(f"{reason}. Images: {dlc_images}")
                # Creating an empty file for because codebuild job fails without it
                report = os.path.join(os.getcwd(), "test", f"{test_type}.xml")
                sm_utils.generate_empty_report(report, test_type, skip_condition)
                return

        testing_image_list = [
            image
            for image in standard_images_list
            if not (
                ("tensorflow-inference" in image and "py2" in image)
                or ("eia" in image)
                or (is_ec2_image(image))
            )
        ]
        run_sagemaker_local_tests(testing_image_list, pytest_cache_params)
        # for EIA Images
        if len(testing_image_list) == 0:
            report = os.path.join(os.getcwd(), "test", f"{test_type}.xml")
            sm_utils.generate_empty_report(report, test_type, "eia")
    else:
        raise NotImplementedError(
            f"{test_type} test is not supported. Only support ec2, ecs, eks, sagemaker, telemetry, security_sanity, and functionality_sanity currently"
        )


if __name__ == "__main__":
    main()
