#pragma once

namespace pytxdnn {

constexpr size_t MAX_SHAPE_DIM = 6; // n,h,w,c,x,x

// kcore/include/instr_def.h

enum Data_Format {
  Fmt_INT8,
  Fmt_INT16,
  Fmt_FP16,
  Fmt_BF16,
  Fmt_INT32,
  Fmt_FP32,
  Fmt_TF32,
  Fmt_BOOL,
  Fmt_UINT8,
  Fmt_UINT16,
  Fmt_UINT32,
  Fmt_UINT64,
  Fmt_UNUSED,
};

// oplib_tx81 include/common_base.h

enum GEMM_MODE {
  GEMM_NORMAL = 0,
  GEMM_PSUM = 1,   // 1: gemm psum ouput
  GEMM_DIM4 = 2,   // 2: gemm in dim4
  GEMM_GTrans = 3, // 3: gemm + reshape + transpose
  GEMM_TransG = 4, // 4: transpose + reshape + gemm
  GEMM_KVGemm = 5, // 5: right: reshape, output: reshape
  GEMM_QKGemm = 6, // 6: left: reshape, output: transpose(2,3) + reshape
  GEMM_MODE_END
};

enum LAYOUT_MODE {
  LAYOUT_TENSOR = 0,
  LAYOUT_NHWC = 1,
  LAYOUT_NCHW = 2,
  LAYOUT_NCxHWc = 3,
  LAYOUT_NMK = 4,
  LAYOUT_NKxMk = 5,
  LAYOUT_NTHWC = 6,   // 3D Conv/Pool/Deconv
  LAYOUT_NCTHW = 7,   // only InputOp be set, then must add TransposeOp to NTHWC
  LAYOUT_NTCxHWc = 8, // 3D Conv/Pool/Deconv
  LAYOUT_HWOI = 9,
  LAYOUT_CxHWOI = 10,
  LAYOUT_MK = 11,
  LAYOUT_KxMK = 12,
  LAYOUT_Tuple = 13,
  LAYOUT_HWIO = 14,
  LAYOUT_OIHW = 15,
  LAYOUT_IOHW = 16,
  LAYOUT_KM = 17,
  LAYOUT_NKM = 18,
  LAYOUT_CNHW = 19,
  LAYOUT_HWCN = 20,
  LAYOUT_HWNC = 21,
  LAYOUT_Cx = 32 + 1,
  LAYOUT_NTENSOR = 32 + 2,
  LAYOUT_NCx = 32 + 3,
  LAYOUT_MODE_END
};

enum LOAD_MODE {
  LOAD_NORMAL = 0,     // normal
  LOAD_TENSORR = 1,    // 1: conv, gemm tensor for TP
  LOAD_CNORM2Cx = 2,   // 2: channel norm ( compact -> cx )
  LOAD_GEMM_BATCH = 3, // 3: KxMK  NKxMK (gemm weight), normally 2 dims
  LOAD_MODE_END
};

// oplib_tx81 riscv.h

struct D_BootParamDynInfo {
  uint64_t device_addr;
  uint64_t size;
  uint32_t dtype;
  uint32_t dim;
  uint32_t shape[MAX_SHAPE_DIM];
};

struct D_BootParamHead {
  uint32_t MaxLen; // BootParamHead + n * BootParamDynInfo, n = inputnum + outputnum + paramnum
  uint32_t LdmemLen;
  uint32_t InputNum;
  uint32_t OutputNum;
  uint32_t ParamNum;
  uint32_t reserved;
  uint64_t CacheMemLen;
  uint64_t CacheMemAddr;
  uint32_t DataLen;
  uint32_t reserved1;
  uint64_t DataAddr;
};

// kcore/include/atomic_common.h

struct L_SHAPE {
  int32_t shape_whole[MAX_SHAPE_DIM]; // whole shape
  int32_t shape_start[MAX_SHAPE_DIM]; // start idx of shape slice
  int32_t shape_slice[MAX_SHAPE_DIM]; // length of shape slice
  int32_t shape_real[MAX_SHAPE_DIM];  // real length of the shape slice
  int32_t dim;                        // dimension of the shape
};

struct G_SHAPE {
  int32_t spatial_start[MAX_SHAPE_DIM];
  int32_t spatial_end[MAX_SHAPE_DIM];
  int32_t dynamic_offset[MAX_SHAPE_DIM];
  int32_t shape[MAX_SHAPE_DIM];
  int32_t dim;
  int32_t done;
  int32_t batch_offset[MAX_SHAPE_DIM];
};

struct TSR {
  Data_Format format;
  uint64_t addr;
  L_SHAPE* shape;
};

struct WINDOW_PARAM {
  uint16_t Ky;
  uint16_t Kx;
  uint16_t Kt; // 3D CNN
  uint16_t Sy;
  uint16_t Sx;
  uint16_t St;       // 3D CNN
  uint16_t up_pad_y; // pad_t
  uint16_t dn_pad_y; // pad_b
  uint16_t up_pad_x; // pad_l
  uint16_t dn_pad_x; // pad_r
  uint16_t up_pad_t; // 3D CNN
  uint16_t dn_pad_t; // 3D CNN
};

struct SPATIAL_PARAM {
  int32_t spatial_sharding[MAX_SHAPE_DIM];
  int32_t temporal_slice[MAX_SHAPE_DIM];
  int32_t dp_dim_x;
  int32_t dp_dim_y;
  int32_t tp_dim_x;
  int32_t tp_dim_y;
  int32_t layout;       // LAYOUT_MODE
  int32_t first_dim;    // update dim, default is last dim; from lower to higher
  uint8_t parallel;     // 0: DP, 1: TP, 2: MIX
  uint8_t dp_inner;     // valid when MIX, 0: tp is inner, 1: dp is inner
  uint8_t if_hwt_split; // height, width, time are splitted or not
  LOAD_MODE load_type;
  WINDOW_PARAM window;
};

struct LOAD_VAR_PARAM {
  uint64_t addr; // DDR start address
  int32_t tile_id_this;
  int32_t temporal_slice[MAX_SHAPE_DIM];
  int32_t dma_mode;
  int32_t base_offset;
  SPATIAL_PARAM spatial_param;
};

struct STORE_VAR_PARAM {
  uint64_t addr; // DDR start address
  int32_t tile_id_this;
  LAYOUT_MODE layout;
  int32_t dma_mode;
  int32_t tile_offset;
};

struct GEMM_PARAM {
  int32_t layout;
  uint8_t gemm_l_trs; // left matrix transpose
  uint8_t gemm_r_trs; // right matrix transpose
  uint8_t psum_en;
  uint8_t lrelu_en; // relu or lrelu
  uint8_t relu_en;
  uint8_t scale_en;
  uint8_t bias_en;
  uint8_t quant_zp_cur;   // ouput zero point (0-255)
  uint8_t quant_reserved; // (0-255)
  uint8_t quant_zp_pre;   // ouput zero point (0-255)
  uint8_t quant_q1;       //
  uint8_t quant_q0;       //
  GEMM_MODE op_mode;      // GEMM_MODE: 0: normal, 1: psum output, 2: 4-dim gemm, 3: gemm+reshape+transpose, 4:
                          // transpose+reshape+gemm
};



} // namespace pytxdnn
