# SPDX-License-Identifier: Apache-2.0
# Copyright © 2022-2024 Arm Technology (China) Co. Ltd.

from AIPUBuilder.Optimizer.utils import *
from AIPUBuilder.Optimizer.framework import *

import torch


@quant_register(OpType.BNLL)
def bnll_quantize(self, *args):
    q_mode_activation = self.attrs["q_mode_activation"]
    if QuantMode.is_per_channel(q_mode_activation) == True:
        OPT_FATAL("Currently not support per-channel quantization")
    q_bits_activation = self.attrs["q_bits_activation"]

    inp = self.inputs[0]
    out = self.outputs[0]
    out.qbits = q_bits_activation
    out_sign = False or self.force_dtype_int
    out.dtype = bits2dtype(out.qbits, is_signed=out_sign)
    dev = inp.device
    # out.scale, out.zerop = 2**out.qbits - 1, 0
    # out.qmin, out.qmax = 0, 2**out.qbits - 1
    out.scale, out.zerop, out.qmin, out.qmax, out.dtype = get_linear_quant_params_from_tensor(
        out, q_mode_activation, out.qbits, out_sign)
    lsteps = 2 ** min(inp.qbits, int(self.get_attrs('lut_items_in_bits')))
    lut = linear_dequantize(torch.linspace(inp.qmin, inp.qmax, steps=lsteps, device=dev), inp.scale, inp.zerop)
    pos = lut + torch.log(1 + (-lut).double().exp())
    neg = torch.log(1 + lut.double().exp())
    condition = (lut > 0)
    y = torch.where(condition, pos, neg)
    lut = linear_quantize_clip(y, out.scale, out.zerop, out.qmin, out.qmax)
    self.constants["lut"] = PyTensor(self.name+"/bnll_lut", lut.cpu().numpy().astype(dtype2nptype(out.dtype)))
    out.qinvariant = False
#############################################################
# algorithm describe:
#   if x>0
#     y=x+ln(1+exp(-x))
#   else
#     y=ln(1+exp(x))


@op_register(OpType.BNLL)
def bnll(self, *args):
    inp = self.inputs[0]
    out = self.outputs[0]
    if self.quantized:
        x = inp.betensor
        x = x - inp.qmin
        lut = self.constants["lut"].betensor
        x = torch.reshape(x, (-1,))
        y = lookup_lut_powerof2(x, lut, inp.qbits, False, dtype2bits(
            self.constants["lut"].dtype), is_signed(self.constants["lut"].dtype))
        out.betensor = torch.reshape(y, inp.betensor.shape)
    else:
        pos = inp.betensor + torch.log(1 + (-inp.betensor).double().exp())
        neg = torch.log(1 + inp.betensor.double().exp())
        condition = (inp.betensor > 0)
        out.betensor = torch.where(condition, pos, neg).float()
    return out.betensor
