# Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause

from typing import Optional
from typing import Type
from typing import Union

import torch
from torch import Tensor
from torch.nn import Linear
from torch.nn.functional import linear

from brevitas.function.ops import max_int
from brevitas.function.ops_ste import ceil_ste
from brevitas.inject.defaults import Int8WeightPerTensorFloat
from brevitas.quant_tensor import QuantTensor

from .quant_layer import ActQuantType
from .quant_layer import BiasQuantType
from .quant_layer import QuantWeightBiasInputOutputLayer as QuantWBIOL
from .quant_layer import WeightQuantType

__all__ = ['QuantLinear']


class QuantLinear(QuantWBIOL, Linear):

    def __init__(
            self,
            in_features: int,
            out_features: int,
            bias: Optional[bool] = True,
            weight_quant: Optional[WeightQuantType] = Int8WeightPerTensorFloat,
            bias_quant: Optional[BiasQuantType] = None,
            input_quant: Optional[ActQuantType] = None,
            output_quant: Optional[ActQuantType] = None,
            return_quant_tensor: bool = False,
            device: Optional[torch.device] = None,
            dtype: Optional[torch.dtype] = None,
            **kwargs) -> None:
        Linear.__init__(self, in_features, out_features, bias, device=device, dtype=dtype)
        QuantWBIOL.__init__(
            self,
            weight_quant=weight_quant,
            bias_quant=bias_quant,
            input_quant=input_quant,
            output_quant=output_quant,
            return_quant_tensor=return_quant_tensor,
            **kwargs)

    @property
    def per_elem_ops(self):
        return 2 * self.in_features

    @property
    def output_channel_dim(self):
        return 0

    @property
    def out_channels(self):
        return self.out_features

    @property
    def channelwise_separable(self) -> bool:
        return False

    def forward(self, input: Union[Tensor, QuantTensor]) -> Union[Tensor, QuantTensor]:
        return self.forward_impl(input)

    def inner_forward_impl(self, x: Tensor, quant_weight: Tensor, quant_bias: Optional[Tensor]):
        output_tensor = linear(x, quant_weight, quant_bias)
        return output_tensor

    def quant_output_scale_impl(
            self, inp: Tensor, quant_input_scale: Tensor, quant_weight_scale: Tensor):
        if quant_input_scale.shape == ():
            input_broadcast_shape = tuple([1] * len(inp.size()))
            quant_input_scale = quant_input_scale.view(input_broadcast_shape)
        if quant_weight_scale.shape == ():
            weight_broadcast_shape = tuple([1] * len(self.weight.size()))
            quant_weight_scale = quant_weight_scale.view(weight_broadcast_shape)
        quant_output_scale = linear(quant_input_scale, quant_weight_scale)
        return quant_output_scale
