# type: ignore
from __future__ import annotations
import openvino._pyopenvino
import typing
"""
openvino.properties.intel_npu submodule that simulates ov::intel_npu
"""
__all__ = ['bypass_umd_caching', 'compilation_mode_params', 'compiler_dynamic_quantization', 'compiler_version', 'defer_weights_load', 'device_alloc_mem_size', 'device_total_mem_size', 'driver_version', 'max_tiles', 'qdq_optimization', 'run_inferences_sequentially', 'tiles', 'turbo']
@typing.overload
def bypass_umd_caching() -> str:
    ...
@typing.overload
def bypass_umd_caching(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
@typing.overload
def compilation_mode_params() -> str:
    ...
@typing.overload
def compilation_mode_params(arg0: str) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
@typing.overload
def compiler_dynamic_quantization() -> str:
    ...
@typing.overload
def compiler_dynamic_quantization(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
def compiler_version() -> str:
    ...
@typing.overload
def defer_weights_load() -> str:
    ...
@typing.overload
def defer_weights_load(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
def device_alloc_mem_size() -> str:
    ...
def device_total_mem_size() -> str:
    ...
def driver_version() -> str:
    ...
@typing.overload
def max_tiles() -> str:
    ...
@typing.overload
def max_tiles(arg0: int) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
@typing.overload
def qdq_optimization() -> str:
    ...
@typing.overload
def qdq_optimization(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
@typing.overload
def run_inferences_sequentially() -> str:
    ...
@typing.overload
def run_inferences_sequentially(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
@typing.overload
def tiles() -> str:
    ...
@typing.overload
def tiles(arg0: int) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
@typing.overload
def turbo() -> str:
    ...
@typing.overload
def turbo(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
    ...
