/**
 * Copyright 2019-2020 Huawei Technologies Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef FUSION_ENGINE_OPTIMIZER_GRAPH_OPTIMIZER_GRAPH_FUSION_FUSION_PASS_MANAGER_BUILTIN_PASS_QUANT_PASS_BIAS_OPTIMIZE_QUANT_ROLLBACK_BIAS_OPTIMIZE_QUANT_ROLLBACK_BASE_H_
#define FUSION_ENGINE_OPTIMIZER_GRAPH_OPTIMIZER_GRAPH_FUSION_FUSION_PASS_MANAGER_BUILTIN_PASS_QUANT_PASS_BIAS_OPTIMIZE_QUANT_ROLLBACK_BIAS_OPTIMIZE_QUANT_ROLLBACK_BASE_H_

#include <map>
#include <string>
#include <vector>
#include "common/configuration.h"
#include "common/fe_log.h"
#include "common/math_util.h"
#include "common/op_info_common.h"
#include "common/unknown_shape_util.h"
#include "external/graph/types.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/node_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/tensor_utils.h"
#include "graph_optimizer/fusion_common/pattern_fusion_base_pass.h"
#include "graph_optimizer/graph_fusion/fusion_pass_manager/builtin_pass/quant_pass/quant_host_cpu_op_common.h"

namespace fe {
static const string PATTERN_QUANT = "ascendquant";
static const string PATTERN_CUBE = "cube";
static const string PATTERN_DEQUANT = "ascenddequant";
static const string PATTERN_PAD = "pad";
static const string PATTERN_WEIGHT = "weight";
static const string PATTERN_BIAS = "bias";
static const string PATTERN_SCALE = "deqScale";
static const string PATTERN_OUTPUT = "output";
static const string QUANT = "AscendQuant";
static const string DEQUANT = "AscendDequant";
static const string PAD = "Pad";
static const string ENTER = "Enter";
static const string CONST = "Const";

static const int32_t MIN_BIAS_OPTIMIZE_CHANNEL = 16;
static const size_t INPUT_SIZE_CONTAINS_BIAS = 3;
static const uint32_t DEQUANT_SCALE_INDEX = 1;

const std::vector<std::vector<std::string>> TENSOR_NAME_OF_HOST_CPU_OP = {
    {CUBE_FILTER}, {CUBE_BIAS}, {CUBE_OPTIMIZATION_BIAS, CUBE_OPTIMIZATION_FILTER}};

/* Get Bias or filter index by mode
* The first index is the main tensor and the output tensor will
* be the same as this tensor ,
* Because after Add Node between conv and filter, the weight size of conv
* will decrease to 1, so the conv weight index for cub bias is 0.
*/
const std::vector<std::vector<uint32_t>> ORIGINAL_CONV_WEIGHT_INDEX = {{0}, {0}, {1, 0}};

/* Weight of conv will be decreased if we inserted host op between
* weight and conv, but the anchor will not */
const std::vector<std::vector<uint32_t>> ORIGINAL_CONV_ANCHOR_INDEX = {{1}, {2}, {2, 1}};

enum HostOpMode { WEIGHT_ROLL_BACK_MODE = 0, BIAS_ROLL_BACK_MODE = 1, BIAS_OPTIMIZATION_MODE = 2, MODE_BOTTOM = 3 };

struct PatternNodes {
  ge::NodePtr cube_node;
  ge::NodePtr dequant_node;
  ge::NodePtr quant_node;
};

enum QuantProcessMode { BIAS_OPTIMIZE = 0, QUANT_ROLLBACK, QUANT_UNDIFINED };

class BiasOptimizeQuantRollbackBase : public PatternFusionBasePass {
 protected:
  /*
   * Get cube node co dim value
   */
  virtual Status GetCoValue(ge::NodePtr &cube_node, int64_t &co);

  /*
   * some op like deconvolution, depthwiseconv2d and convedtransposed need to set attr cin_cout_reverse
   * cause those ops bias channel is equal to weight's C channel dim value
   */
  virtual void SetCinCoutReverse(ge::NodePtr &nodePtr);

  /*
   * set cube node bias name
   */
  Status SetBiasName(std::string &bias_name);

  /*
   * judge quant process mode
   * bias optimize or quant rollback
   */
  virtual Status GetQuantProcessMode(ge::NodePtr &quant_node, ge::NodePtr &cube_node,
                                     QuantProcessMode &quant_process_mode);

  /*
   * judge deq_scale shape info
   * only support 1-D shape
   * soc_version is v100: offset_w should be zero
   */
  Status JudgeDeqscaleShape(ge::NodePtr &dequant_node);

  /*
   * remove Enter node
   */
  Status RemoveEnter(ge::ComputeGraph &graph, ge::NodePtr node);

  /*
   * set offset attr of quant node to cube node
   */
  Status SetQuantParameters(ge::NodePtr &cube_node, ge::NodePtr &quant_node);

  /*
   * create bias input for cube node
   * value is all zero, case bias optimize will insert host op
   */
  Status CreateBiasInput(ge::ComputeGraph &graph, ge::NodePtr &cube_node, const int64_t &co,
                         vector<ge::NodePtr> &fusion_nodes);

  // judge cube node has bias input or not
  bool IsCubeHasBiasInput(ge::NodePtr cube_node);

  /*
   * judge cube node need bias input or not
   * sometimes cube node has no bias input,
   * and it does not need that
   */
  virtual bool IsCubeNeedBiasInput(ge::NodePtr cube_node);

  /*
   * do bias optimize, entry function
   */
  virtual Status BiasOptimize(ge::ComputeGraph &graph, ge::NodePtr &cube_node, ge::NodePtr &dequant_node,
                              ge::NodePtr &quant_node, vector<ge::NodePtr> &fusion_nodes);

  /*
   * after do quant rolback, we need refresh cube node dtype
   */
  Status SetCubeNodeDataType(ge::NodePtr &cube_node, ge::DataType &data_type, ge::DataType &target_data_type);

  virtual Status SetDataTypeOfNodes(ge::NodePtr &cube_node);

  /* In this function we will remove the edge between node and its predecessors,
   * so the param peer_out_anchors_of_node is used for recording the predecessors'
   * output data anchor before removing the edge. Because if the edges are
   * removed, we will not able to get the peer out anchor any more.
   */
  Status RemoveInputEdgeAndSingleConstInput(ge::ComputeGraph &graph, ge::NodePtr &node,
                                            std::vector<ge::OutDataAnchorPtr> &peer_out_anchors_of_node);

  Status LinkOutputEdgeWithoutControl(ge::NodePtr old_node, ge::NodePtr new_node, string &cube_name,
                                      const std::vector<ge::OutDataAnchorPtr> &peer_out_anchors_of_old_node);

  /*
   * create host op function
   * host op type: QuantWeightRollBack, QuantBiasRollBack, QuantBiasOptimization
   */
  Status CreateNewHostCpuOp(const string &op_type, struct PatternNodes &pattern_node, ge::ComputeGraph &graph,
                            uint32_t mode, /* HostOpMode */
                            vector<ge::NodePtr> &fus_nodes);

  // quant rolback
  // create rollback host op
  Status DoFusion(ge::ComputeGraph &graph, ge::NodePtr &cube_node, ge::NodePtr &quant_node, ge::NodePtr &dequant_node,
                  vector<ge::NodePtr> &new_nodes);

  /*
   * when change quant node edge, if cube node is conv2d,
   * may has pad node before, so we need find node after quant
   * then remove edge between qunat and node after quant
   */
  virtual ge::NodePtr GetCubeNodeInputNode(ge::NodePtr &cube_node);

  // do quant rollback, remove edge of quant node
  // before remove quant node, we need to judge quant has other out node or not,
  // case we only remove quant node when it has one output node
  Status ChangeQuantNodeEdge(ge::ComputeGraph &graph, ge::NodePtr &cube_node, ge::NodePtr &quant_node);

  // do quant rollback, remove edge of dequant node
  // then remove dequant node
  virtual Status ChangeDequantNodeEdge(ge::ComputeGraph &graph, ge::NodePtr &cube_node, ge::NodePtr &dequant_node);

  /*
   * do quant rollback, entry function
   */
  virtual Status QuantRollback(ge::ComputeGraph &graph, ge::NodePtr &cube_node, ge::NodePtr &dequant_ndoe,
                               ge::NodePtr &quant_node, vector<ge::NodePtr> &fusion_nodes);

 protected:
  Status Fusion(ge::ComputeGraph &graph, Mapping &mapping, vector<ge::NodePtr> &fusion_nodes) override;

 private:
  // different cube node has different bias name
  std::string bias_name_ = "bias";
};

}  // namespace fe

#endif  // FUSION_ENGINE_OPTIMIZER_GRAPH_OPTIMIZER_GRAPH_FUSION_FUSION_PASS_MANAGER_BUILTIN_PASS_QUANT_PASS_BIAS_OPTIMIZE_QUANT_ROLLBACK_BIAS_OPTIMIZE_QUANT_ROLLBACK_BASE_H_
