# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from typing_extensions import overload, override
from typing import Dict, Union

from azure.ai.evaluation._common._experimental import experimental
from azure.ai.evaluation._common.constants import EvaluationMetrics
from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase


# cspell:ignore ssrf, vuln
@experimental
class CodeVulnerabilityEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
    """
    Evaluates code vulnerability for a given query and response for a single-turn evaluation only,
    where query represents the user query or code before the completion, and response represents the code recommended by the assistant.

    The code vulnerability evaluation checks for vulnerabilities in the following coding languages:

    - Python
    - Java
    - C++
    - C#
    - Go
    - Javascript
    - SQL

    The code vulnerability evaluation identifies the following vulnerabilities:

    - path-injection
    - sql-injection
    - code-injection
    - stack-trace-exposure
    - incomplete-url-substring-sanitization
    - flask-debug
    - clear-text-logging-sensitive-data
    - incomplete-hostname-regexp
    - server-side-unvalidated-url-redirection
    - weak-cryptographic-algorithm
    - full-ssrf
    - bind-socket-all-network-interfaces
    - client-side-unvalidated-url-redirection
    - likely-bugs
    - reflected-xss
    - clear-text-storage-sensitive-data
    - tarslip
    - hardcoded-credentials
    - insecure-randomness

    :param credential: The credential for connecting to Azure AI project. Required
    :type credential: ~azure.core.credentials.TokenCredential
    :param azure_ai_project: The Azure AI project, which can either be a string representing the project endpoint
        or an instance of AzureAIProject. It contains subscription id, resource group, and project name.
    :type azure_ai_project: Union[str, ~azure.ai.evaluation.AzureAIProject]
    :param kwargs: Additional arguments to pass to the evaluator.
    :type kwargs: Any

    .. note::

        If this evaluator is supplied to the `evaluate` function, the metric
        for the code vulnerability will be "code_vulnerability_label".
    """

    id = "azureai://built-in/evaluators/code_vulnerability"
    """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
    _OPTIONAL_PARAMS = ["query"]

    @override
    def __init__(
        self,
        credential,
        azure_ai_project,
        **kwargs,
    ):
        # Set default for evaluate_query if not provided
        if "evaluate_query" not in kwargs:
            kwargs["evaluate_query"] = True

        super().__init__(
            eval_metric=EvaluationMetrics.CODE_VULNERABILITY,
            azure_ai_project=azure_ai_project,
            credential=credential,
            **kwargs,
        )

    @overload
    def __call__(
        self,
        *,
        query: str,
        response: str,
    ) -> Dict[str, Union[str, float]]:
        """Evaluate a given query/response pair for code vulnerability

        :keyword query: The query to be evaluated.
        :paramtype query: str
        :keyword response: The response to be evaluated.
        :paramtype response: str
        :return: The code vulnerability label.
        :rtype: Dict[str, Union[str, bool]]
        """

    @override
    def __call__(  # pylint: disable=docstring-missing-param
        self,
        *args,
        **kwargs,
    ):
        """Evaluate code vulnerability. Accepts query and response for a single-turn evaluation only.

        :keyword query: The query to be evaluated.
        :paramtype query: Optional[str]
        :keyword response: The response to be evaluated.
        :paramtype response: Optional[str]
        :rtype: Dict[str, Union[str, bool]]
        """

        return super().__call__(*args, **kwargs)
