import os
import shutil
import glob
import pathlib
from typing import Dict, List, Optional
import yaml
import lemonade.common.printing as printing
import lemonade.common.build as build
import lemonade.common.exceptions as exp

CACHE_MARKER = ".lemonadecache"
BUILD_MARKER = ".lemonadebuild"


def rmdir(folder, excludes: Optional[List[str]] = None):
    """
    Remove the contents of a directory from the filesystem.
    If `<name>` is in `excludes`, the directory itself and the file named <name>
    are kept. Otherwise, the entire directory is removed.
    """

    # Use an empty list by default
    if excludes:
        excludes_to_use = excludes
    else:
        excludes_to_use = []

    if os.path.isdir(folder):
        for filename in os.listdir(folder):
            file_path = os.path.join(folder, filename)
            if file_path not in excludes_to_use:
                if os.path.isfile(file_path) or os.path.islink(file_path):
                    os.unlink(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)

        if excludes is None:
            shutil.rmtree(folder)

        return True

    else:
        return False


def get_all(path, exclude_path=False, file_type=build.state_file_name, recursive=True):
    if recursive:
        files = [
            os.path.join(dp, f)
            for dp, dn, filenames in os.walk(path)
            for f in filenames
            if file_type in f
        ]
    else:
        files = []
        dp, _, filenames = os.walk(path)
        for f in filenames:
            if file_type in f:
                files.append(os.path.join(dp, f))

    if exclude_path:
        files = [os.path.basename(f) for f in files]

    return files


def clean_file_name(script_path: str) -> str:
    """
    Trim the ".py" / ".onnx" if present.

    If its a state.yaml file, trim the "_state.yaml"
    """

    if script_path.endswith("_" + build.state_file_name):
        return pathlib.Path(script_path).stem.replace(
            "_" + os.path.splitext(build.state_file_name)[0], ""
        )
    else:
        return pathlib.Path(script_path).stem


class CacheError(exp.Error):
    """
    Raise this exception when the cache is being accessed incorrectly
    """


def _load_yaml(file) -> Dict:
    if os.path.isfile(file):
        with open(file, "r", encoding="utf8") as stream:
            return yaml.load(stream, Loader=yaml.FullLoader)
    else:
        return {}


def save_yaml(dict: Dict, file):
    with open(file, "w", encoding="utf8") as outfile:
        yaml.dump(dict, outfile)


def print_yaml_file(file_path, description):
    if os.path.exists(file_path):
        with open(file_path, "r", encoding="utf-8") as file:
            printing.log_info(f"The {description} for {file_path} are:")
            print(file.read())
    else:
        raise CacheError(
            f"No {description} found at {file_path}. "
            "Try running `lemonade cache --list` to see the builds in your build cache."
        )


def make_cache_dir(cache_dir: str):
    """
    Create the build and cache directories, and put hidden files in them
    to mark them as such.
    """

    os.makedirs(cache_dir, exist_ok=True)

    # File that indicates that the directory is a cache directory
    cache_file_path = os.path.join(cache_dir, CACHE_MARKER)
    open(cache_file_path, mode="w", encoding="utf").close()


def make_build_dir(cache_dir: str, build_name: str):
    """
    Create the build and cache directories, and put hidden files in them
    to mark them as such.
    """
    make_cache_dir(cache_dir)

    build_dir = build.output_dir(cache_dir, build_name)
    os.makedirs(build_dir, exist_ok=True)

    # File that indicates that the directory is a build directory
    build_file_path = os.path.join(build_dir, BUILD_MARKER)
    open(build_file_path, mode="w", encoding="utf").close()


def check_cache_dir(cache_dir: str):
    cache_file_path = os.path.join(cache_dir, CACHE_MARKER)
    if not os.path.isfile(cache_file_path):
        raise CacheError(
            f"{cache_dir} is not a cache directory generated by Lemonade. "
            "You can only clean, delete and generate reports for directories that "
            "have been generated by Lemonade. Set a different --cache-dir before "
            "trying again."
        )


def is_build_dir(cache_dir: str, build_name: str):
    build_dir = build.output_dir(cache_dir, build_name)
    build_file_path = os.path.join(build_dir, BUILD_MARKER)
    return os.path.isfile(build_file_path)


def clean_output_dir(cache_dir: str, build_name: str) -> None:
    """
    Delete all elements of the output directory that are not human readable
    """
    output_dir = build.output_dir(cache_dir, build_name)
    if os.path.isdir(output_dir) and is_build_dir(cache_dir, build_name):
        output_dir = os.path.expanduser(output_dir)
    else:
        raise CacheError(f"No build found at {output_dir}")

    # Remove files that do not have an allowed extension
    allowed_extensions = (".txt", ".out", ".yaml", ".json", ".png")
    all_paths = glob.glob(f"{output_dir}/**/*", recursive=True)
    for path in all_paths:
        if os.path.isfile(path) and not path.endswith(allowed_extensions):
            os.remove(path)

    # Remove all empty folders
    for path in all_paths:
        if os.path.isdir(path):
            if len(os.listdir(path)) == 0:
                shutil.rmtree(path)


def get_available_builds(cache_dir):
    """
    Get all of the build directories within the build cache
    located at `cache_dir`
    """

    check_cache_dir(cache_dir)

    builds = [
        pathlib.PurePath(build_name).name
        for build_name in os.listdir(os.path.abspath(build.builds_dir(cache_dir)))
        if os.path.isdir(build.output_dir(cache_dir, build_name))
        and is_build_dir(cache_dir, build_name)
    ]
    builds.sort()

    return builds


class Keys:
    # Number of parameters in the model
    PARAMETERS = "parameters"
    # List of all build tools in the Sequence
    SELECTED_SEQUENCE_OF_TOOLS = "selected_sequence_of_tools"
    # MeasuredPerformance data for a benchmarked workload
    PERFORMANCE = "performance"
    # Runtime used for the benchmark
    RUNTIME = "runtime"
    # Type of device used for the benchmark (e.g., "x86")
    DEVICE_TYPE = "device_type"
    # Specific device used for the benchmark
    DEVICE = "device"
    # Name of the model
    MODEL_NAME = "model_name"
    # Number of iterations used in benchmarking
    ITERATIONS = "iterations"
    # System information to keep track of DUT
    SYSTEM_INFO = "system_info"
    # Indicates status of the most recent build tool run: FunctionStatus
    BUILD_STATUS = "build_status"
    # Prefix for reporting the execution duration of a tool
    # In the report this will look like tool_duration:TOOL_NAME
    TOOL_DURATION = "tool_duration"
    # Prefix for reporting the peak working memory in the build through this tool
    # In the report this will look like tool_memory:TOOL_NAME
    TOOL_MEMORY = "tool_memory"
    # Prefix for reporting the execution status of a tool
    # In the report this will look like tool_status:TOOL_NAME
    TOOL_STATUS = "tool_status"
    # Records the date and time of the evaluation after analysis but before
    # build and benchmark
    TIMESTAMP = "timestamp"
    # Records the logfile of any failed tool/benchmark
    ERROR_LOG = "error_log"
    # Name of the build in the cache
    BUILD_NAME = "build_name"
    # Sequence of tools used for this build, along with their args
    SEQUENCE_INFO = "sequence_info"
    # Version of Lemonade used for the build
    LEMONADE_VERSION = "lemonade_version"
    # Unique ID for this build
    UID = "uid"
    # Directory where the lemonade build cache is stored
    CACHE_DIR = "cache_dir"
    # Example inputs to the model
    INPUTS = "inputs"
    # Path to the file containing the memory usage plot
    MEMORY_USAGE_PLOT = "memory_usage_plot"
    # Average of all tested MMLU subject scores
    AVERAGE_MMLU_ACCURACY = "average_mmlu_accuracy"


def _clean_logfile(logfile_lines: List[str]) -> List[str]:
    """
    Remove the whitespace and empty lines from an array of logfile lines
    """
    return "\n".join([line.rstrip() for line in logfile_lines if line.rstrip()])


def stats_file(cache_dir: str, build_name: str):
    """
    Returns the expected location of the lemonade stats file
    """
    dir = build.output_dir(cache_dir, build_name)
    return os.path.join(dir, "lemonade_stats.yaml")


class Stats:
    def __init__(self, cache_dir: str, build_name: str):
        self.file = stats_file(cache_dir, build_name)

        os.makedirs(os.path.dirname(self.file), exist_ok=True)
        if not os.path.exists(self.file):
            # Start an empty stats file
            save_yaml({}, self.file)

    @property
    def stats(self):
        return _load_yaml(self.file)

    def _set_key(self, dict, keys: List["str"], value):
        """
        Recursive approach to safely setting a key within any level of hierarchy
        in a dictionary. If a parent key of the desired key does not exist, create
        it and set it with an empty dictionary before proceeding.

        The end result is: dict[keys[0]][keys[1]]...[keys[-1]] = value
        """
        if len(keys) == 1:
            dict[keys[0]] = value

        else:
            if keys[0] not in dict.keys():
                dict[keys[0]] = {}

            self._set_key(dict[keys[0]], keys[1:], value)

    def save_stat(self, key: str, value):
        """
        Save statistics to an yaml file in the build directory
        """

        stats_dict = self.stats

        self._set_key(stats_dict, [key], value)

        save_yaml(stats_dict, self.file)

    def save_sub_stat(self, parent_key: str, key: str, value):
        stats_dict = self.stats

        self._set_key(stats_dict, [parent_key, key], value)

        save_yaml(stats_dict, self.file)

    def save_eval_error_log(self, logfile_path):
        if logfile_path is None:
            # Avoid an error in the situation where we crashed before
            # initializing the tool (in which case it has no logfile path yet)
            return
        if os.path.exists(logfile_path):
            with open(logfile_path, "r", encoding="utf-8") as f:
                full_log = f.readlines()

                # Log files can be quite large, so we will just record the beginning
                # and ending lines. Users can always open the log file if they
                # want to see the full log.
                start_cutoff = 5
                end_cutoff = -30
                max_full_length = start_cutoff + abs(end_cutoff)

                if len(full_log) > max_full_length:
                    log_start = _clean_logfile(full_log[:start_cutoff])
                    log_end = _clean_logfile(full_log[end_cutoff:])
                    truncation_notice = (
                        "NOTICE: This copy of the log has been truncated to the first "
                        f"{start_cutoff} and last {abs(end_cutoff)} lines "
                        f"to save space. Please see {logfile_path} "
                        "to see the full log.\n"
                    )

                    stats_log = log_start + truncation_notice + log_end
                else:
                    stats_log = _clean_logfile(full_log)

                self.save_stat(Keys.ERROR_LOG, stats_log)


def expand_inputs(input_paths: List[str]) -> List[str]:
    """
    Convert regular expressions in input paths
    into full file/dir paths (e.g., [*.py] -> [a.py, b.py] )

    This makes up for Windows not resolving wildcards on the command line
    """
    input_paths_expanded = sum(
        [glob.glob(f) for f in input_paths if "::" not in f], []
    ) + [f for f in input_paths if "::" in f]

    if not input_paths_expanded:
        raise exp.ArgError("No files that match your inputs could be found.")

    return input_paths_expanded


# This file was originally licensed under Apache 2.0. It has been modified.
# Modifications Copyright (c) 2025 AMD
