# Copyright (c) 2023 Huawei Technologies Co., Ltd. All rights reserved.
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import inspect
import unittest
import pytest
import shutil
import subprocess
from pathlib import Path
from typing import List, Mapping

from openmind import (
    is_mindformers_available,
    is_ms_available,
    is_torch_available,
    is_transformers_available,
    is_diffusers_available,
    is_mindnlp_available,
    is_mindone_available,
    is_lmeval_available,
    is_lmdeploy_available,
)
from openmind.utils.generic import strtobool
from openmind.utils.import_utils import (
    is_decord_available,
    is_detectron2_available,
    is_pytesseract_available,
    is_timm_available,
    is_vision_available,
)

if is_torch_available():
    import torch


class CustomTestCase(unittest.TestCase):
    def setUp(self):
        # get_auto_remove_tmp_dir feature:
        self.teardown_tmp_dirs = []

        self._test_file_path = inspect.getfile(self.__class__)
        path = Path(self._test_file_path).resolve()
        self._tests_dir = path.parents[0]

    def tearDown(self):
        # get_auto_remove_tmp_dir feature: remove registered temp dirs
        for path in self.teardown_tmp_dirs:
            shutil.rmtree(path, ignore_errors=True)
        self.teardown_tmp_dirs = []

    # Copied from https://github.com/huggingface/transformers/blob/main/src/transformers/testing_utils.py#L1917
    def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
        if tmp_dir is not None:
            # defining the most likely desired behavior for when a custom path is provided.
            # this most likely indicates the debug mode where we want an easily locatable dir that:
            # 1. gets cleared out before the test (if it already exists)
            # 2. is left intact after the test
            if before is None:
                before = True
            if after is None:
                after = False

            # using provided path
            path = Path(tmp_dir).resolve()

            # to avoid nuking parts of the filesystem, only relative paths are allowed
            if not tmp_dir.startswith("./"):
                raise ValueError(f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`")

            # ensure the dir is empty to start with
            if before is True and path.exists():
                shutil.rmtree(tmp_dir, ignore_errors=True)

            path.mkdir(parents=True, exist_ok=True)

        else:
            # defining the most likely desired behavior for when a unique tmp path is auto generated
            # (not a debug mode), here we require a unique tmp dir that:
            # 1. is empty before the test (it will be empty in this situation anyway)
            # 2. gets fully removed after the test
            if before is None:
                before = True
            if after is None:
                after = True

            # using unique tmp dir (always empty, regardless of `before`)
            tmp_dir = tempfile.mkdtemp()

        if after is True:
            # register for deletion
            self.teardown_tmp_dirs.append(tmp_dir)

        return tmp_dir


class ANY:
    def __init__(self, *_types):
        self._types = _types

    def __eq__(self, other):
        return isinstance(other, self._types)

    def __repr__(self):
        return f"ANY({', '.join(_type.__name__ for _type in self._types)})"


_run_slow_tests = os.environ.get("RUN_SLOW", default=False)


def mockenv(**kwargs):
    return unittest.mock.patch.dict(os.environ, kwargs)


def slow(func):
    """
    Decorator marking a test as slow.

    Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.

    If a reason is provided, it will be used in the skip message.
    """

    reason = "Skipping slow tests because RUN_SLOW is not set"

    return pytest.mark.skipif(not _run_slow_tests, reason=reason)(func)


def parse_flag_from_env(key, default=False):
    try:
        value = os.environ[key]
    except KeyError:
        # KEY isn't set, default to `default`.
        _value = default
    else:
        # KEY is set, convert it to True or False.
        try:
            _value = strtobool(value)
        except ValueError as e:
            # More values are supported, but let's keep the message simple.
            raise ValueError(f"If set, {key} must be yes or no.") from e
    return _value


_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)


def is_pipeline_test(test_case):
    """
    Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
    skipped.
    """
    if not _run_pipeline_tests:
        return unittest.skip("test is pipeline test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_pipeline_test()(test_case)


def require_torch(test_case):
    return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)


def require_lm_eval(test_case):
    return unittest.skipUnless(is_lmeval_available(), "test requires lm_eval")(test_case)


def require_vision(test_case):
    """
    Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
    installed.
    """
    return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)


def require_diffusers(test_case):
    """
    Decorator marking a test that requires diffusers. These tests are skipped when diffusers isn't
    installed.
    """
    return unittest.skipUnless(is_diffusers_available(), "test requires diffusers")(test_case)


def require_mindone(test_case):
    """
    Decorator marking a test that requires mindone. These tests are skipped when mindone isn't
    installed.
    """
    return unittest.skipUnless(is_mindone_available() and not is_torch_available(), "test requires mindone")(test_case)


def require_mindnlp(test_case):
    """
    Decorator marking a test that requires mindnlp. These tests are skipped when mindnlp isn't
    installed.
    """
    return unittest.skipUnless(is_mindnlp_available() and not is_torch_available(), "test requires mindnlp")(test_case)


def require_detectron2(test_case):
    """Decorator marking a test that requires detectron2."""
    return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)


def require_pytesseract(test_case):
    """
    Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
    """
    return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)


def require_decord(test_case):
    """
    Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
    """
    return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)


def require_timm(test_case):
    """
    Decorator marking a test that requires Timm.

    These tests are skipped when Timm isn't installed.

    """
    return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)


def require_mindspore(test_case):
    return unittest.skipUnless(is_ms_available(), "test requires MindSpore")(test_case)


def require_transformers(test_case):
    return unittest.skipUnless(is_transformers_available() and not is_ms_available(), "test requires Transformers")(
        test_case
    )


def require_mindformers(test_case):
    return unittest.skipUnless(is_mindformers_available() and not is_torch_available(), "test requires Mindformers")(
        test_case
    )


def require_lmdeploy(test_case):
    return unittest.skipUnless(is_lmdeploy_available(), "test requires LMDeploy")(test_case)


def nested_simplify(obj, decimals=3):
    """
    Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
    within tests.
    """
    import numpy as np

    if isinstance(obj, list):
        return [nested_simplify(item, decimals) for item in obj]
    if isinstance(obj, tuple):
        return tuple([nested_simplify(item, decimals) for item in obj])
    elif isinstance(obj, np.ndarray):
        return nested_simplify(obj.tolist())
    elif isinstance(obj, Mapping):
        return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
    elif isinstance(obj, (str, int, np.int64)):
        return obj
    elif obj is None:
        return obj
    elif is_torch_available() and isinstance(obj, torch.Tensor):
        return nested_simplify(obj.tolist(), decimals)
    elif isinstance(obj, float):
        return round(obj, decimals)
    elif isinstance(obj, (np.int32, np.float32)):
        return nested_simplify(obj.item(), decimals)
    else:
        raise Exception(f"Not supported: {type(obj)}")


def run_command(command: List[str], return_stdout=False, env=None):
    """
    Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
    if an error occured while running `command`
    """
    # Cast every path in `command` to a string
    for i, c in enumerate(command):
        if isinstance(c, Path):
            command[i] = str(c)
    if env is None:
        env = os.environ.copy()
    try:
        output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env, shell=False)
        if return_stdout:
            if hasattr(output, "decode"):
                output = output.decode("utf-8")
            return output
    except subprocess.CalledProcessError as e:
        raise Exception(f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}") from e
