#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
#define ACCUM_FLT4 float4
#define FLT float
#define FLT2 float2
#define FLT3 float3
#define FLT4 float4
#define TO_FLT4 convert_float4
#define TO_ACCUM_TYPE convert_float4
#define TO_ACCUM_FLT convert_float
#define READ_IMAGE read_imagef
#define WRITE_IMAGE write_imagef
__constant sampler_t smp_edge = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;
__constant sampler_t smp_none = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;
__constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
#define FLT8 float8         // 定义了float8和float16
#define FLT16 float16
__kernel void conv_1x1_read(
__global float4* biases_buffer,
  __global float4* dst_tensor_buffer,           // 64个36个14400个float4
  __global float4* src_tensor_buffer,           // 32个36个14400个float4
  __global float16* weights_buffer,             // 32*36*32*2*4*4
  int4 shared_int4_0,                           // 14400, 36, 64, 36
  int4 shared_int4_1) {                         // 32, 14400, 0, 0
  int X = get_global_id(0) * 2;                 // X*2是因为一个kernel算两个连续的ic4, x = range(0, 7200) * 2
  int X_SRC = get_global_id(0) * 2;             // X_SRC == X
  int Y = get_global_id(1) * 1;                 // y = range(0, 36)
  int Z = get_global_id(2) * 2;                 // Z*2是因为filter划分为group, z = range(0, 32) * 2

  // if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) return;
  // if (x >= 14400 || y >= 36 || z >= 64)
  if (X >= shared_int4_0.x || Y >= shared_int4_0.y || Z >= shared_int4_0.z) return;

  // (z*36 + y*2) * 32获取一个起始地址，此处一个float16即一个IC4OC4，此处其实是在这里连续读取两个IC4OC4
  // 这里应该这么写更恰当，((z/2)*36 + Y) * 32 * 2
  // 这是因为weight内存排布里, filter被分为16个大小为8的组, 每个组对应两个oc4, 而此处z是oc4的idx
  // 所以这里z应该改为z/2才合适，表示第z/2个group的第y个pos的起始坐标，每个pos又是一个32*2*4*4的vector，先在是要用这个vector进行计算
  __global FLT16* weights_cache = weights_buffer + (Z * shared_int4_0.w + Y * 2) * shared_int4_1.x;
  // 因为这个Kernel是要算两个IC4OC4，所以此处取出8个channel的bias，即两个float4
  ACCUM_FLT4 bias_val_0 = TO_ACCUM_TYPE(biases_buffer[Z + 0]);
  ACCUM_FLT4 r000 = bias_val_0;                     // group 0 for 第一个输入
  ACCUM_FLT4 r001 = bias_val_0;                     // group 0 for 第二个输入
  ACCUM_FLT4 bias_val_1 = TO_ACCUM_TYPE(biases_buffer[Z + 1]);
  ACCUM_FLT4 r100 = bias_val_1;                     // group 1 for 第一个输入
  ACCUM_FLT4 r101 = bias_val_1;                     // group 1 for 第二个输入
  int xc0 = min(X_SRC + 0, shared_int4_1.y - 1);        // min(x+0, 14400-1), min(x, src_tensor_width)        
  int xc1 = min(X_SRC + 1, shared_int4_1.y - 1);        
  int yc0 = min(Y + 0, shared_int4_0.w - 1);            // min(y, 36-1), min(y, src_tensor_height)
  int src_addr_0 = (yc0) * shared_int4_1.y + (xc0);     // src_addr_0 = yc0 * 14400 + xc0，是输入中第0个slice内部的(y, x)局部坐标
  int src_addr_1 = (yc0) * shared_int4_1.y + (xc1);     // src_addr_0 = yc0 * 14400 + xc1，是输入中第0个slice内部的(y, x+1)局部坐标
  for (int s = 0; s < shared_int4_1.x; ++s) {           // for s in range(0, 32)，对于ic中的每个slice
    FLT4 s0 = src_tensor_buffer[src_addr_0];            // 从输入中第s个slice中读取两个连续的float4, (y, x), (y, x+1)
    FLT4 s1 = src_tensor_buffer[src_addr_1];
    FLT16 W0 = weights_cache[0];                        // 从weight中读取一个float 16， IC4OC4
    r000 += W0.s0123 * s0.s0;                           // W0.s0123是4个filter中的channel_s同一个位置, s0.s0是input中channel_s上同一个位置的值，这样子算出oc4
    r000 += W0.s4567 * s0.s1;                           // W0.s4567是4个filter中的channel_s+1的同一个位置，这里是沿着ic维度的累加
    r000 += W0.s89ab * s0.s2;
    r000 += W0.scdef * s0.s3;                           // 直到channel_s+3，到此一个ic4计算完成
    r001 += W0.s0123 * s1.s0;                           // 算input中的下一个ic4
    r001 += W0.s4567 * s1.s1;
    r001 += W0.s89ab * s1.s2;
    r001 += W0.scdef * s1.s3;
    FLT16 W1 = weights_cache[1];                        // 果然如之前所看到的，其一次算两个f16，即两个group
    r100 += W1.s0123 * s0.s0;
    r100 += W1.s4567 * s0.s1;
    r100 += W1.s89ab * s0.s2;
    r100 += W1.scdef * s0.s3;
    r101 += W1.s0123 * s1.s0;
    r101 += W1.s4567 * s1.s1;
    r101 += W1.s89ab * s1.s2;
    r101 += W1.scdef * s1.s3;
    src_addr_0 += shared_int4_1.y * shared_int4_0.w;    // src_addr_0 += 14400*36, 即跳到输入中的下一个input_slice
    src_addr_1 += shared_int4_1.y * shared_int4_0.w;   
    weights_cache += 2;                                 // 取权值中的下个input slice
  }

// 将计算结果赋值
  if (X + 0 < shared_int4_0.x && Y + 0 < shared_int4_0.y) {
    FLT4 res = TO_FLT4(r000);
    dst_tensor_buffer[(((Z + 0) * shared_int4_0.y + (Y + 0)) * shared_int4_0.x + (X + 0))] = res;
;
  }
  if (X + 1 < shared_int4_0.x && Y + 0 < shared_int4_0.y) {
    FLT4 res = TO_FLT4(r001);
    dst_tensor_buffer[(((Z + 0) * shared_int4_0.y + (Y + 0)) * shared_int4_0.x + (X + 1))] = res;
;
  }
  if (Z + 1 >= shared_int4_0.z) return;
  if (X + 0 < shared_int4_0.x && Y + 0 < shared_int4_0.y) {
    FLT4 res = TO_FLT4(r100);
    dst_tensor_buffer[(((Z + 1) * shared_int4_0.y + (Y + 0)) * shared_int4_0.x + (X + 0))] = res;
;
  }
  if (X + 1 < shared_int4_0.x && Y + 0 < shared_int4_0.y) {
    FLT4 res = TO_FLT4(r101);
    dst_tensor_buffer[(((Z + 1) * shared_int4_0.y + (Y + 0)) * shared_int4_0.x + (X + 1))] = res;
;
  }
}