#  -*- coding: utf-8 -*-
#  Copyright (c) 2025-2025 Huawei Technologies Co., Ltd.
#  #
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#  #
#  http://www.apache.org/licenses/LICENSE-2.0
#  #
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.

__all__ = [
    "AutoFakeQuantLinear",
    "AutoFakeQuantDynamicCache",
    "W8A8StaticFakeQuantLinear",
    "W8A8DynamicFakeQuantLinear",
    "FakeQuantDynamicCache",

    "int8_per_tensor_sym",
    "int8_per_channel_sym",
    "int8_per_channel_asym",
    "int8_per_token_sym",
    "int8_per_group_sym",
    "int8_per_tensor_asym",
    "int8_per_token_asym",
]

from .auto import AutoFakeQuantLinear, AutoFakeQuantDynamicCache
from .const import int8_per_tensor_sym, int8_per_channel_sym, int8_per_token_sym, int8_per_group_sym, \
    int8_per_tensor_asym, int8_per_token_asym, int8_per_channel_asym
from .w8a8 import W8A8DynamicFakeQuantLinear, W8A8StaticFakeQuantLinear
from .attention import FakeQuantDynamicCache