#pragma once

#include "caffe2/operators/fully_connected_op.h"
#include "caffe2/operators/quantized/server/dnnlowp_op.h"
#include <fbgemm/Fbgemm.h>

namespace caffe2 {

template <typename T>
class FullyConnectedDNNLowPOp
  : public DNNLowPOp<T, FullyConnectedOp<CPUContext>> {
 public:
  FullyConnectedDNNLowPOp(const OperatorDef& operator_def, Workspace *ws);
  bool RunOnDevice() override;

  USE_OPERATOR_FUNCTIONS(CPUContext);
  USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, FullyConnectedOp<CPUContext>);

 protected:
  bool GetQuantizationParameters_();

  std::size_t axis_{1};
  std::size_t axis_w_{1};
  vector<std::int64_t> Y_shape_cache_;

  dnnlowp::RequantizationParams requantization_params_;
  bool requantization_param_selected_{false};

  // x86 only provides SIMD instructions that multiply a signed integer with an
  // unsigned integer. We use signed for weights.
  using T_signed = typename std::make_signed<T>::type;

  // used in fast path for T == uint8_t
  std::unique_ptr<fbgemm2::PackBMatrix<std::int8_t>> Wq_packed_;
  std::vector<std::uint8_t> X_pack_buf_;

  std::vector<std::int32_t> Y_int32_;

  // used in slow path for T != uint8_t
  std::vector<T_signed> W_quantized_;

  // pre-computed biases and offsets
  std::vector<std::int32_t> b_quantized_;
  const std::int32_t* b_quantized_data_{nullptr};
  std::vector<std::int32_t> row_offsets_, column_offsets_;

  // Dequantized bias populated when input bias is quantized and
  // dequantized_output_ == true
  std::vector<float> b_dequantized_;
  const float* b_dequantized_data_{nullptr};

  bool is_weight_constant_{true};

  float in_qparams0_scale_old_ = 0;
}; // class FullyConnectedDNNLowPOp

} // namespace caffe2
