/**
 * Copyright 2019-2020 Huawei Technologies Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "common/math_util.h"

namespace fe {
#define FP16_EXP_BIAS (15)
#define FP32_EXP_BIAS (127)
#define FP16_MAN_LEN (10)
#define FP32_MAN_LEN (23)
#define FP32_SIGN_INDEX (31)
#define FP16_MAN_MASK (0x03FF)
#define FP16_MAN_HIDE_BIT (0x0400)
#define FP16_MAX_EXP (0x001F)
#define FP32_MAX_MAN (0x7FFFFF)

float Uint16ToFloat(const uint16_t &intVal) {
  float ret;

  uint16_t hfSign = (intVal >> 15) & 1;
  int16_t hfExp = (intVal >> 10) & FP16_MAX_EXP;
  uint16_t hfMan = ((intVal >> 0) & 0x3FF) | ((((intVal >> 10) & 0x1F) > 0 ? 1 : 0) * 0x400);
  if (0 == hfExp) {
    hfExp = 1;
  }

  while (hfMan && !(hfMan & FP16_MAN_HIDE_BIT)) {
    hfMan <<= 1;
    hfExp--;
  }

  uint32_t sRet, eRet, mRet, fVal;

  sRet = hfSign;
  if (!hfMan) {
    eRet = 0;
    mRet = 0;
  } else {
    eRet = hfExp - FP16_EXP_BIAS + FP32_EXP_BIAS;
    mRet = hfMan & FP16_MAN_MASK;
    mRet = mRet << (FP32_MAN_LEN - FP16_MAN_LEN);
  }
  fVal = ((sRet) << FP32_SIGN_INDEX) | ((eRet) << FP32_MAN_LEN) | ((mRet)&FP32_MAX_MAN);
  ret = *((float *)&fVal);

  return ret;
}

Status NnAddScalar(const int32_t n, const float alpha, float &xx) {
  float *x = &xx;
  FE_CHECK_NOTNULL(x);

  for (int32_t i = 0; i < n; ++i) {
    FE_FLOAT_ADDCHECK(x[i], alpha);
    x[i] += alpha;
  }

  return SUCCESS;
}

Status NnPowx(const int32_t n, const float &aa, const float b, float &yy) {
  const float *a = &aa;
  float *y = &yy;
  FE_CHECK_NOTNULL(a);
  FE_CHECK_NOTNULL(y);

  for (int32_t i = 0; i < n; ++i) {
    y[i] = pow(a[i], b);
  }

  return SUCCESS;
}

Status NnDiv(const int32_t n, const float &aa, const float &bb, float &yy) {
  const float *a = &aa;
  const float *b = &bb;
  float *y = &yy;
  FE_CHECK_NOTNULL(a);
  FE_CHECK_NOTNULL(b);
  FE_CHECK_NOTNULL(y);
  float div_ep_s = 1e-6;

  for (int32_t i = 0; i < n; ++i) {
    if (fabs(b[i]) < div_ep_s) {
      FE_LOGE("divide num is zero error!");
      return FAILED;
    }
    y[i] = a[i] / b[i];
  }

  return SUCCESS;
}

Status NnMax(const int32_t n, const float &xx, float &y) {
  const float *x = &xx;
  FE_CHECK_NOTNULL(x);
  y = 0;

  for (int32_t i = 0; i < n; ++i) {
    y = y > fabs(x[i]) ? y : fabs(x[i]);
  }

  return SUCCESS;
}

Status NnMul(const int32_t n, const float &aa, const float &bb, float &yy) {
  const float *a = &aa;
  const float *b = &bb;
  float *y = &yy;
  FE_CHECK_NOTNULL(a);
  FE_CHECK_NOTNULL(b);
  FE_CHECK_NOTNULL(y);
  for (int32_t i = 0; i < n; ++i) {
    FE_FLOAT_MULCHECK(a[i], b[i]);
    y[i] = a[i] * b[i];
  }

  return SUCCESS;
}

Status NnMulScalar(const int32_t n, const float &aa, float b, float &yy) {
  const float *a = &aa;
  float *y = &yy;
  FE_CHECK(a == nullptr, FE_LOGE("a is null, fusion failed."), return PARAM_INVALID);
  FE_CHECK(y == nullptr, FE_LOGE("y is null, fusion failed."), return PARAM_INVALID);

  for (int32_t i = 0; i < n; ++i) {
    FE_FLOAT_MULCHECK(a[i], b);
    y[i] = a[i] * b;
  }

  return SUCCESS;
}

Status NnAdd(const int32_t n, const float &aa, const float &bb, float &yy) {
  const float *input_a = &aa;
  const float *input_b = &bb;
  float *y = &yy;
  FE_CHECK_NOTNULL(input_a);
  FE_CHECK_NOTNULL(input_b);
  FE_CHECK_NOTNULL(y);

  for (int32_t i = 0; i < n; ++i) {
    FE_FLOAT_ADDCHECK(input_a[i], input_b[i]);
    y[i] = input_a[i] + input_b[i];
  }

  return SUCCESS;
}

Status NnScale(const int32_t n, const float alpha, const float &xx, float &yy) {
  const float *x = &xx;
  float *y = &yy;
  FE_CHECK_NOTNULL(x);
  FE_CHECK_NOTNULL(y);

  for (int32_t i = 0; i < n; ++i) {
    FE_FLOAT_MULCHECK(alpha, x[i]);
    y[i] = alpha * x[i];
  }

  return SUCCESS;
}

Status NnScale(const int32_t n, const float *alpha, const float &xx, float &yy) {
  const float *x = &xx;
  float *y = &yy;
  FE_CHECK_NOTNULL(x);
  FE_CHECK_NOTNULL(y);

  for (int32_t i = 0; i < n; ++i) {
    FE_FLOAT_MULCHECK(alpha[i], x[i]);
    y[i] = alpha[i] * x[i];
  }

  return SUCCESS;
}

Status print_array(const int32_t n, float &yy, std::string name) {
  static int count = 0;
  FE_LOGD("%u. %s = {", count++, name.c_str());
  float *y = &yy;
  for (int32_t i = 0; i < n; ++i) {
    FE_LOGD("y[%u] = %f", i, y[i]);
  }
  return SUCCESS;
}

Status print_array(const int32_t n, const float *yy, std::string name) {
  static int count = 0;
  FE_LOGD("%u. %s = {", count++, name.c_str());
  for (int32_t i = 0; i < n; ++i) {
    FE_LOGD("yy[%u] = %f", i, yy[i]);
  }
  return SUCCESS;
}
}  // namespace fe
