#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
#define ACCUM_FLT4 float4
#define FLT float
#define FLT2 float2
#define FLT3 float3
#define FLT4 float4
#define TO_FLT4 convert_float4
#define TO_ACCUM_TYPE convert_float4
#define TO_ACCUM_FLT convert_float
#define READ_IMAGE read_imagef
#define WRITE_IMAGE write_imagef
__constant sampler_t smp_edge = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;
__constant sampler_t smp_none = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;
__constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
#define FLT8 float8
#define FLT16 float16
__kernel void conv_1x1_tflite(
__global float4* biases_buffer,
  __global float4* dst_tensor_buffer,
  __global float4* src_tensor_buffer,
  __global float16* weights_buffer,
  int4 shared_int4_0,
  int4 shared_int4_1) {
    
  int X = get_global_id(0) * 2;
  int X_SRC = get_global_id(0) * 2;
  int Y = get_global_id(1) * 1;
  int Z = get_global_id(2) * 2;
  if (X >= shared_int4_0.x || Y >= shared_int4_0.y || Z >= shared_int4_0.z) return;
  __global FLT16* weights_cache = weights_buffer + (Z * shared_int4_0.w + Y * 2) * shared_int4_1.x;
  ACCUM_FLT4 bias_val_0 = TO_ACCUM_TYPE(biases_buffer[Z + 0]);
  ACCUM_FLT4 r000 = bias_val_0;
  ACCUM_FLT4 r001 = bias_val_0;
  ACCUM_FLT4 bias_val_1 = TO_ACCUM_TYPE(biases_buffer[Z + 1]);
  ACCUM_FLT4 r100 = bias_val_1;
  ACCUM_FLT4 r101 = bias_val_1;
  int xc0 = min(X_SRC + 0, shared_int4_1.y - 1);
  int xc1 = min(X_SRC + 1, shared_int4_1.y - 1);
  int yc0 = min(Y + 0, shared_int4_0.w - 1);
  int src_addr_0 = (yc0) * shared_int4_1.y + (xc0);
  int src_addr_1 = (yc0) * shared_int4_1.y + (xc1);
  for (int s = 0; s < shared_int4_1.x; ++s) {
    FLT4 s0 = src_tensor_buffer[src_addr_0];
    FLT4 s1 = src_tensor_buffer[src_addr_1];
    FLT16 W0 = weights_cache[0];
    r000 += W0.s0123 * s0.s0;
    r000 += W0.s4567 * s0.s1;
    r000 += W0.s89ab * s0.s2;
    r000 += W0.scdef * s0.s3;
    r001 += W0.s0123 * s1.s0;
    r001 += W0.s4567 * s1.s1;
    r001 += W0.s89ab * s1.s2;
    r001 += W0.scdef * s1.s3;
    FLT16 W1 = weights_cache[1];
    r100 += W1.s0123 * s0.s0;
    r100 += W1.s4567 * s0.s1;
    r100 += W1.s89ab * s0.s2;
    r100 += W1.scdef * s0.s3;
    r101 += W1.s0123 * s1.s0;
    r101 += W1.s4567 * s1.s1;
    r101 += W1.s89ab * s1.s2;
    r101 += W1.scdef * s1.s3;
    src_addr_0 += shared_int4_1.y * shared_int4_0.w;
    src_addr_1 += shared_int4_1.y * shared_int4_0.w;
    weights_cache += 2;
  }
  if (X + 0 < shared_int4_0.x && Y + 0 < shared_int4_0.y) {
    FLT4 res = TO_FLT4(r000);
    dst_tensor_buffer[(((Z + 0) * shared_int4_0.y + (Y + 0)) * shared_int4_0.x + (X + 0))] = res;
;
  }
  if (X + 1 < shared_int4_0.x && Y + 0 < shared_int4_0.y) {
    FLT4 res = TO_FLT4(r001);
    dst_tensor_buffer[(((Z + 0) * shared_int4_0.y + (Y + 0)) * shared_int4_0.x + (X + 1))] = res;
;
  }
  if (Z + 1 >= shared_int4_0.z) return;
  if (X + 0 < shared_int4_0.x && Y + 0 < shared_int4_0.y) {
    FLT4 res = TO_FLT4(r100);
    dst_tensor_buffer[(((Z + 1) * shared_int4_0.y + (Y + 0)) * shared_int4_0.x + (X + 0))] = res;
;
  }
  if (X + 1 < shared_int4_0.x && Y + 0 < shared_int4_0.y) {
    FLT4 res = TO_FLT4(r101);
    dst_tensor_buffer[(((Z + 1) * shared_int4_0.y + (Y + 0)) * shared_int4_0.x + (X + 1))] = res;
;
  }
}