/**
 * Copyright 2022 Huawei Technologies Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef MINDSPORE_NNACL_FP32_CUMSUM_AVX512_H_
#define MINDSPORE_NNACL_FP32_CUMSUM_AVX512_H_

#include "nnacl/intrinsics/ms_simd_instructions.h"
#include "nnacl/intrinsics/ms_simd_avx512_instructions.h"

#ifdef __cplusplus
extern "C" {
#endif
#pragma GCC push_options
#pragma GCC target("avx512f")
#define MS_SIMD_INSTRUCTION MS_SIMD_AVX512_INSTRUCTION
#define BLOCK_NUM 16
#define MS_SIMD_AVX512

// (a, b, c) -> (a, a+b, a+b+c)  exclusive == false
// (a, b, c) -> (0, a,   a+b)    exclusive == true
static inline int64_t CumsumOutputInitWithInputAVX512(int64_t index, const float *layer_input,
  float *layer_output, int inner_dim) {
  for (int block_max_size = inner_dim - BLOCK_NUM + 1; index < block_max_size; index += BLOCK_NUM) {
    SIMD_ST_F32(layer_output + index, SIMD_LD_F32(layer_input + index));
  }
  return index;
}

static inline int64_t CumsumOutputInitWithZeroAVX512(int64_t index, float *layer_output, int inner_dim) {
  for (int block_max_size = inner_dim - BLOCK_NUM + 1; index < block_max_size; index += BLOCK_NUM) {
    SIMD_ST_F32(layer_output + index, SIMD_MOV_F32(0.0f));
  }
  return index;
}

static inline int64_t CumsumAVX512(int64_t index, const float *layer_input, float *layer_output, float *layer_last_output,
  int inner_dim) {
  for (int block_max_size = inner_dim - BLOCK_NUM + 1; index < block_max_size; index += BLOCK_NUM) {
    SIMD_F32 input_val = SIMD_LD_F32(layer_input + index);
    SIMD_F32 last_output_val = SIMD_LD_F32(layer_last_output + index);
    SIMD_F32 out_val = SIMD_ADD_F32(input_val, last_output_val);
    SIMD_ST_F32(layer_output + index, out_val);
  }
  return index;
}

// (a, b, c) -> (c+b+a, c+b, c) exclusive==false
// (a, b, c) -> (c+b, c, 0) exclusive==true
static inline int64_t CumsumReverseAVX512(int64_t index, const float *layer_input, float *layer_output,
  float *layer_last_output, int inner_dim) {

  for (int block_max_size = inner_dim - BLOCK_NUM + 1; index < block_max_size; index += BLOCK_NUM) {
    SIMD_F32 input_val = SIMD_LD_F32(layer_input - index - BLOCK_NUM + 1);
    SIMD_F32 last_output_val = SIMD_LD_F32(layer_last_output - index - BLOCK_NUM + 1);
    SIMD_F32 out_val = SIMD_ADD_F32(input_val, last_output_val);
    SIMD_ST_F32(layer_output - index - BLOCK_NUM + 1, out_val);
  }
  return index;
}

// (a, b, c) -> (a, a+b, a+b+c)  exclusive == false
// (a, b, c) -> (0, a,   a+b)    exclusive == true
static inline int64_t CumsumIntOutputInitWithInputAVX512(int64_t index, const int32_t *layer_input,
  int32_t *layer_output, int inner_dim) {
  for (int block_max_size = inner_dim - BLOCK_NUM + 1; index < block_max_size; index += BLOCK_NUM) {
    SIMD_ST_EPI32(layer_output + index, SIMD_LD_EPI32(layer_input + index));
  }
  return index;
}

static inline int64_t CumsumIntOutputInitWithZeroAVX512(int64_t index, int32_t *layer_output, int inner_dim) {
  for (int block_max_size = inner_dim - BLOCK_NUM + 1; index < block_max_size; index += BLOCK_NUM) {
    SIMD_ST_EPI32(layer_output + index, SIMD_MOV_EPI32(0.0f));
  }
  return index;
}

static inline int64_t CumsumIntAVX512(int64_t index, const int32_t *layer_input, int32_t *layer_output, int32_t *layer_last_output,
  int inner_dim) {
  for (int block_max_size = inner_dim - BLOCK_NUM + 1; index < block_max_size; index += BLOCK_NUM) {
    SIMD_EPI32 input_val = SIMD_LD_EPI32(layer_input + index);
    SIMD_EPI32 last_output_val = SIMD_LD_EPI32(layer_last_output + index);
    SIMD_EPI32 out_val = SIMD_ADD_EPI32(input_val, last_output_val);
    SIMD_ST_EPI32(layer_output + index, out_val);
  }
  return index;
}

// (a, b, c) -> (c+b+a, c+b, c) exclusive==false
// (a, b, c) -> (c+b, c, 0) exclusive==true
static inline int64_t CumsumReverseIntAVX512(int64_t index, const int32_t *layer_input, int32_t *layer_output, int32_t *layer_last_output,
  int inner_dim) {
  for (int block_max_size = inner_dim - BLOCK_NUM + 1; index < block_max_size; index += BLOCK_NUM) {
    SIMD_EPI32 input_val = SIMD_LD_EPI32(layer_input - index - BLOCK_NUM + 1);
    SIMD_EPI32 last_output_val = SIMD_LD_EPI32(layer_last_output - index - BLOCK_NUM + 1);
    SIMD_EPI32 out_val = SIMD_ADD_EPI32(input_val, last_output_val);
    SIMD_ST_EPI32(layer_output - index - BLOCK_NUM + 1, out_val);
  }
  return index;
}

#undef MS_SIMD_INSTRUCTION
#undef BLOCK_NUM
#pragma GCC pop_options
#undef MS_SIMD_AVX512
#ifdef __cplusplus
}
#endif
#endif
