from te import tik
from topi.cce import util


class DistanceCompute():
    def __init__(self, input_queries, input_coarse_centroids,
                 input_precomputed, output_distances,
                 kernel_name="distance_compute"):
        self.shape_queries = input_queries.get("shape")
        self.dtype_queries = input_queries.get("dtype")
        self.shape_coarse_centroids = input_coarse_centroids.get("shape")
        self.dtype_coarse_centroids = input_coarse_centroids.get("dtype")
        self.shape_precomputed = input_precomputed.get("shape")
        self.dtype_precomputed = input_precomputed.get("dtype")
        self.shape_distances = output_distances.get("shape")
        self.dtype_distances = output_distances.get("dtype")
        self.kernel_name = kernel_name

        # compute parameter
        self.queries_num, self.dim = self.shape_queries
        self.coarse_centroids_num, = self.shape_precomputed

        # check parameter
        self.check_parameter()

        # set max vector mask and conv max mask
        self.fp32_vector_mask_max = 64
        self.fp16_vector_mask_max = 128

        self.aicore_use = 2
        self.coarse_centroids_num_each_core = self.coarse_centroids_num // \
            self.aicore_use // 16 * 16
        self.coarse_centroids_num_last_core = self.coarse_centroids_num - \
            (self.aicore_use - 1) * self.coarse_centroids_num_each_core

        # L0B--64KB
        if self.dim >= 352:
            self.queries_num_each_loop = ((32768 // self.dim) // 16) * 16
        else:
            if queries_num_compute(self.dim) < 16:
                self.queries_num_each_loop = queries_num_compute(self.dim)
            else:
                self.queries_num_each_loop = (queries_num_compute(self.dim) //
                                              16) * 16

        # pingpang buffer: 32KB
        self.coarse_centroids_num_each_loop = ((16384 // self.dim) // 16) * 16

        # The target machine is defined by the Dprofile function,
        # and the TIK DSL container is constructed by the Tik function.
        self.tik_instance = tik.Tik(tik.Dprofile("v100", "mini"))

        self.coeff = self.tik_instance.Scalar('float32')
        self.coeff.set_as(-2)

        self.zero = self.tik_instance.Scalar('float32')
        self.zero.set_as(0)

        # creat input tensor: input_queries_gm, input_coarse_centroids_gm
        # and input_precomputed_gm
        # and output tensor: output_distances_gm in global buffer
        self.input_queries_gm = self.tik_instance.Tensor(
            self.dtype_queries, self.shape_queries,
            name="input_queries_gm", scope=tik.scope_gm)
        self.input_coarse_centroids_gm = self.tik_instance.Tensor(
            self.dtype_coarse_centroids, self.shape_coarse_centroids,
            name="input_coarse_centroids_gm", scope=tik.scope_gm)
        self.input_precomputed_gm = self.tik_instance.Tensor(
            self.dtype_precomputed, self.shape_precomputed,
            name="input_precomputed_gm", scope=tik.scope_gm)
        self.output_distances_gm = self.tik_instance.Tensor(
            self.dtype_distances, self.shape_distances,
            name="output_distances_gm", scope=tik.scope_gm)

    def check_parameter(self):
        # check shape and dtype of input
        check_list = ["float16"]
        util.check_shape_rule(self.shape_queries)
        util.check_shape_rule(self.shape_coarse_centroids)
        util.check_shape_rule(self.shape_precomputed)
        util.check_shape_rule(self.shape_distances)

        util.check_tensor_shape_size(self.shape_queries)
        util.check_tensor_shape_size(self.shape_coarse_centroids)
        util.check_tensor_shape_size(self.shape_precomputed)
        util.check_tensor_shape_size(self.shape_distances)

        util.check_dtype_rule(self.dtype_queries, check_list)
        util.check_dtype_rule(self.dtype_coarse_centroids, check_list)
        util.check_dtype_rule(self.dtype_precomputed, check_list)
        util.check_dtype_rule(self.dtype_distances, check_list)

        util.check_kernel_name(self.kernel_name)

        if self.dim % 16 != 0:
            raise RuntimeError("feature dim must be a multiple of 16")
        if self.coarse_centroids_num % 16 != 0:
            raise RuntimeError("coarse_centroids num must be a multiple of 16")

    def distance_compute_each_loop(self, aicore_move_offset,
                                   aicore_coarse_centroids_num, move_offset,
                                   move_num):
        # move queries from out to UB
        queries_ub_fp16 = self.tik_instance.Tensor("float16",
                                                   (move_num, self.dim),
                                                   name="queries_ub_fp16",
                                                   scope=tik.scope_ubuf)
        self.tik_instance.data_move(queries_ub_fp16,
                                    self.input_queries_gm[move_offset, 0], 0,
                                    1, move_num * self.dim // 16, 0, 0)

        # queries do conv from fp16 to fp32
        queries_ub = self.tik_instance.Tensor("float32", (move_num, self.dim),
                                              name="queries_ub",
                                              scope=tik.scope_ubuf)
        vconv_loop = (move_num * self.dim) // (self.fp32_vector_mask_max * 255)
        vconv_offset = self.tik_instance.Scalar(dtype="int32")
        vconv_offset.set_as(0)
        if vconv_loop > 0:
            with self.tik_instance.for_range(0, vconv_loop) as conv_index:
                vconv_offset.set_as(conv_index * self.fp32_vector_mask_max *
                                    255)
                self.tik_instance.vconv(
                    self.fp32_vector_mask_max, "none",
                    queries_ub[vconv_offset // self.dim,
                               vconv_offset % self.dim],
                    queries_ub_fp16[vconv_offset // self.dim,
                                    vconv_offset % self.dim], 255, 1, 1, 8, 4)
            vconv_offset.set_as(vconv_offset + self.fp32_vector_mask_max * 255)

        vconv_repeat_time = (move_num * self.dim) % \
            (self.fp32_vector_mask_max * 255) // self.fp32_vector_mask_max
        if vconv_repeat_time > 0:
            self.tik_instance.vconv(
                self.fp32_vector_mask_max, "none",
                queries_ub[vconv_offset // self.dim, vconv_offset % self.dim],
                queries_ub_fp16[vconv_offset // self.dim,
                                vconv_offset % self.dim], vconv_repeat_time, 1,
                1, 8, 4)
            vconv_offset.set_as(vconv_offset +
                                vconv_repeat_time * self.fp32_vector_mask_max)

        vconv_last_num = (move_num * self.dim) % self.fp32_vector_mask_max
        if vconv_last_num > 0:
            self.tik_instance.vconv(
                vconv_last_num, "none", queries_ub[vconv_offset // self.dim,
                                                   vconv_offset % self.dim],
                queries_ub_fp16[vconv_offset // self.dim,
                                vconv_offset % self.dim], 1, 1, 1, 8, 4)

        # creat vcadd tensor of queries and set zero
        queries_vcadd_ub = self.tik_instance.Tensor("float32", (move_num, 16),
                                                    name="queries_vcadd_ub",
                                                    scope=tik.scope_ubuf)
        dup_repeat_time = (move_num * 16) // self.fp32_vector_mask_max
        dup_offset = 0
        if dup_repeat_time > 0:
            self.tik_instance.vector_dup(self.fp32_vector_mask_max,
                                         queries_vcadd_ub[0], self.zero,
                                         dup_repeat_time, 1, 8)
            dup_offset += dup_repeat_time * self.fp32_vector_mask_max

        dup_last_num = (move_num * 16) % self.fp32_vector_mask_max
        if dup_last_num > 0:
            self.tik_instance.vector_dup(
                dup_last_num, queries_vcadd_ub[dup_offset // 16,
                                               dup_offset % 16], self.zero, 1,
                1, 8)

        # move queries from out to L1
        shape_queries_l1 = (self.dim // 16, (move_num + 15) // 16, 16, 16)
        queries_l1 = self.tik_instance.Tensor("float16",
                                              shape_queries_l1,
                                              name="queries_l1",
                                              scope=tik.scope_cbuf)
        with self.tik_instance.for_range(0, self.dim // 16) as i:
            self.tik_instance.data_move(
                queries_l1[i, 0, 0, 0], self.input_queries_gm[move_offset,
                                                              i * 16], 0,
                move_num, 1, self.dim // 16 - 1, 0)

        # move queries from L1 to L0-B
        queries_l0b = self.tik_instance.Tensor("float16",
                                               shape_queries_l1,
                                               name="queries_l0b",
                                               scope=tik.scope_cb)
        self.tik_instance.load2dv1(queries_l0b[0], queries_l1[0], 0,
                                   (self.dim // 16) * ((move_num + 15) // 16),
                                   1, 0)

        # compute L2 distance of queries
        # do vmul
        vmul_loop = (move_num * self.dim) // (self.fp32_vector_mask_max * 255)
        vmul_offset = self.tik_instance.Scalar(dtype="int32")
        vmul_offset.set_as(0)
        if vmul_loop > 0:
            with self.tik_instance.for_range(0, vmul_loop) as mul_index:
                vmul_offset.set_as(mul_index * self.fp32_vector_mask_max * 255)
                self.tik_instance.vmul(
                    self.fp32_vector_mask_max,
                    queries_ub[vmul_offset // self.dim,
                               vmul_offset % self.dim],
                    queries_ub[vmul_offset // self.dim,
                               vmul_offset % self.dim],
                    queries_ub[vmul_offset // self.dim,
                               vmul_offset % self.dim], 255, 1, 1, 1, 8, 8, 8)
            vmul_offset.set_as(vmul_offset + self.fp32_vector_mask_max * 255)

        vmul_repeat_time = (move_num * self.dim) % \
            (self.fp32_vector_mask_max * 255) // self.fp32_vector_mask_max
        if vmul_repeat_time > 0:
            self.tik_instance.vmul(
                self.fp32_vector_mask_max, queries_ub[vmul_offset // self.dim,
                                                      vmul_offset % self.dim],
                queries_ub[vmul_offset // self.dim, vmul_offset % self.dim],
                queries_ub[vmul_offset // self.dim, vmul_offset % self.dim],
                vmul_repeat_time, 1, 1, 1, 8, 8, 8)
            vmul_offset.set_as(vmul_offset +
                               vmul_repeat_time * self.fp32_vector_mask_max)

        vmul_last_num = (move_num * self.dim) % self.fp32_vector_mask_max
        if vmul_last_num > 0:
            self.tik_instance.vmul(
                vmul_last_num, queries_ub[vmul_offset // self.dim,
                                          vmul_offset % self.dim],
                queries_ub[vmul_offset // self.dim, vmul_offset % self.dim],
                queries_ub[vmul_offset // self.dim,
                           vmul_offset % self.dim], 1, 1, 1, 1, 8, 8, 8)

        # do vcadd
        vcadd_repeat_time = self.dim // self.fp32_vector_mask_max
        vcadd_offset = 0
        if vcadd_repeat_time > 0:
            with self.tik_instance.for_range(0, move_num) as loop_num:
                self.tik_instance.vcadd(self.fp32_vector_mask_max,
                                        queries_vcadd_ub[loop_num, 0],
                                        queries_ub[loop_num, 0],
                                        vcadd_repeat_time, 1, 1, 8)
            vcadd_offset += vcadd_repeat_time * self.fp32_vector_mask_max

        vcadd_last_num = self.dim % self.fp32_vector_mask_max
        if vcadd_last_num > 0:
            with self.tik_instance.for_range(0, move_num) as loop_num:
                self.tik_instance.vcadd(
                    vcadd_last_num, queries_vcadd_ub[loop_num,
                                                     vcadd_repeat_time],
                    queries_ub[loop_num, vcadd_offset], 1, 1, 1, 8)

        # do vcadd
        queries_l2_ub = self.tik_instance.Tensor("float32", (move_num, ),
                                                 name="queries_l2_ub",
                                                 scope=tik.scope_ubuf)
        self.tik_instance.vcadd(16, queries_l2_ub[0], queries_vcadd_ub[0],
                                move_num, 1, 1, 2)

        # dup queries_l2 to self.coarse_centroids_num_each_loop dim
        single_query_l2 = self.tik_instance.Scalar('float32')
        queries_l2_dup_ub = self.tik_instance.Tensor(
            "float32", (move_num, self.coarse_centroids_num_each_loop),
            name="queries_l2_dup_ub",
            scope=tik.scope_ubuf)
        dup_repeat_time = self.coarse_centroids_num_each_loop // \
            self.fp32_vector_mask_max
        dup_offset = 0
        if dup_repeat_time > 0:
            with self.tik_instance.for_range(0, move_num) as loop_num:
                single_query_l2.set_as(queries_l2_ub[loop_num])
                self.tik_instance.vector_dup(self.fp32_vector_mask_max,
                                             queries_l2_dup_ub[loop_num, 0],
                                             single_query_l2, dup_repeat_time,
                                             1, 8)
            dup_offset += dup_repeat_time * self.fp32_vector_mask_max

        dup_last_num = self.coarse_centroids_num_each_loop % \
            self.fp32_vector_mask_max
        if dup_last_num > 0:
            with self.tik_instance.for_range(0, move_num) as loop_num:
                single_query_l2.set_as(queries_l2_ub[loop_num])
                self.tik_instance.vector_dup(
                    dup_last_num, queries_l2_dup_ub[loop_num, dup_offset],
                    single_query_l2, 1, 1, 8)

        # compute xy using cube
        coarse_centroids_loop_time = aicore_coarse_centroids_num // \
            self.coarse_centroids_num_each_loop
        coarse_centroids_move_offset = self.tik_instance.Scalar(dtype="int32")
        coarse_centroids_move_offset.set_as(0)
        if coarse_centroids_loop_time > 0:
            thread_num_need = 2 if coarse_centroids_loop_time > 1 else 1
            with self.tik_instance.for_range(
                    0, coarse_centroids_loop_time,
                    thread_num=thread_num_need) as loop_coarse_centroids:
                coarse_centroids_move_offset.set_as(
                    loop_coarse_centroids *
                    self.coarse_centroids_num_each_loop)
                self.cube_compute_each_loop(
                    queries_l0b, queries_l2_dup_ub, aicore_move_offset,
                    coarse_centroids_move_offset,
                    self.coarse_centroids_num_each_loop, move_offset, move_num)
            coarse_centroids_move_offset.set_as(
                coarse_centroids_move_offset +
                self.coarse_centroids_num_each_loop)

        coarse_centroids_last_num = aicore_coarse_centroids_num % \
            self.coarse_centroids_num_each_loop
        if coarse_centroids_last_num > 0:
            self.cube_compute_each_loop(queries_l0b, queries_l2_dup_ub,
                                        aicore_move_offset,
                                        coarse_centroids_move_offset,
                                        coarse_centroids_last_num, move_offset,
                                        move_num)

    def cube_compute_each_loop(self, queries_l0b, queries_l2_dup_ub,
                               aicore_move_offset,
                               coarse_centroids_move_offset,
                               coarse_centroids_move_num, queries_move_offset,
                               queries_move_num):
        queries_block_num = (queries_move_num + 15) // 16
        # move coarse centroids L2 distance from out to UB
        coarse_centroids_l2_ub_fp16 = self.tik_instance.Tensor(
            "float16", (coarse_centroids_move_num, ),
            name="coarse_centroids_l2_ub_fp16",
            scope=tik.scope_ubuf)
        self.tik_instance.data_move(
            coarse_centroids_l2_ub_fp16[0],
            self.input_precomputed_gm[aicore_move_offset +
                                      coarse_centroids_move_offset], 0, 1,
            coarse_centroids_move_num // 16, 0, 0)

        # coarse_centroids_l2 do conv from fp16 to fp32
        coarse_centroids_l2_ub = self.tik_instance.Tensor(
            "float32", (coarse_centroids_move_num, ),
            name="coarse_centroids_l2_ub",
            scope=tik.scope_ubuf)
        vconv_repeat_time = coarse_centroids_move_num // \
            self.fp32_vector_mask_max
        vconv_offset = 0
        if vconv_repeat_time > 0:
            self.tik_instance.vconv(self.fp32_vector_mask_max, "none",
                                    coarse_centroids_l2_ub[vconv_offset],
                                    coarse_centroids_l2_ub_fp16[vconv_offset],
                                    vconv_repeat_time, 1, 1, 8, 4)
            vconv_offset += vconv_repeat_time * self.fp32_vector_mask_max

        vconv_last_num = coarse_centroids_move_num % self.fp32_vector_mask_max
        if vconv_last_num > 0:
            self.tik_instance.vconv(vconv_last_num, "none",
                                    coarse_centroids_l2_ub[vconv_offset],
                                    coarse_centroids_l2_ub_fp16[vconv_offset],
                                    1, 1, 1, 8, 4)

        # move coarse centroids from out to L0A
        coarse_centroids_l0a = self.tik_instance.Tensor(
            "float16",
            (coarse_centroids_move_num // 16, self.dim // 16, 16, 16),
            name="coarse_centroids_l0a",
            scope=tik.scope_ca)
        self.tik_instance.load2dv1(
            coarse_centroids_l0a[0],
            self.input_coarse_centroids_gm[(aicore_move_offset +
                                            coarse_centroids_move_offset) //
                                           16, 0, 0, 0], 0,
            coarse_centroids_move_num * self.dim // 256, 1, 0)

        inner_product_l0c = self.tik_instance.Tensor(
            "float32",
            (queries_block_num, coarse_centroids_move_num // 16, 16, 16),
            name="inner_product_l0c",
            scope=tik.scope_cc)
        inner_product_fp32_ub = self.tik_instance.Tensor(
            "float32",
            (queries_block_num, coarse_centroids_move_num // 16, 16, 16),
            name="inner_product_fp32_ub",
            scope=tik.scope_ubuf)
        inner_product_ub = self.tik_instance.Tensor(
            "float16",
            (queries_block_num, coarse_centroids_move_num // 16, 16, 16),
            name="inner_product_ub",
            scope=tik.scope_ubuf)

        # do mmad
        self.tik_instance.mmad(inner_product_l0c[0], coarse_centroids_l0a[0],
                               queries_l0b[0], coarse_centroids_move_num,
                               self.dim, queries_block_num * 16, 0)

        # move inner_product from L0C to UB
        self.tik_instance.data_move(
            inner_product_fp32_ub[0], inner_product_l0c[0], 0, 1,
            (queries_block_num * coarse_centroids_move_num) // 16, 0, 0)

        # inner_product do conv from fp32 to fp16 and transpose
        vconv_loop = (queries_block_num * (coarse_centroids_move_num // 16) *
                      256) // (self.fp32_vector_mask_max * 255)
        vconv_offset = self.tik_instance.Scalar(dtype="int32")
        vconv_offset.set_as(0)
        if vconv_loop > 0:
            with self.tik_instance.for_range(0, vconv_loop) as conv_index:
                vconv_offset.set_as(conv_index *
                                    (self.fp32_vector_mask_max * 255))
                self.tik_instance.vconv(
                    self.fp32_vector_mask_max, "none", inner_product_ub[
                        vconv_offset //
                        ((coarse_centroids_move_num // 16) * 256),
                        vconv_offset %
                        ((coarse_centroids_move_num // 16) * 256) // 256,
                        vconv_offset %
                        ((coarse_centroids_move_num // 16) * 256) % 256 // 16,
                        vconv_offset %
                        ((coarse_centroids_move_num // 16) * 256) % 256 % 16],
                    inner_product_fp32_ub[
                        vconv_offset //
                        ((coarse_centroids_move_num // 16) * 256),
                        vconv_offset %
                        ((coarse_centroids_move_num // 16) * 256) // 256,
                        vconv_offset %
                        ((coarse_centroids_move_num // 16) * 256) % 256 // 16,
                        vconv_offset %
                        ((coarse_centroids_move_num // 16) * 256) % 256 % 16],
                    255, 1, 1, 4, 8)
            vconv_offset.set_as(vconv_offset + self.fp32_vector_mask_max * 255)

        vconv_repeat_time = (queries_block_num *
                             (coarse_centroids_move_num // 16) * 256) % \
            (self.fp32_vector_mask_max * 255) // self.fp32_vector_mask_max
        if vconv_repeat_time > 0:
            self.tik_instance.vconv(
                self.fp32_vector_mask_max, "none",
                inner_product_ub[vconv_offset //
                                 ((coarse_centroids_move_num // 16) * 256),
                                 vconv_offset %
                                 ((coarse_centroids_move_num // 16) * 256) //
                                 256, vconv_offset %
                                 ((coarse_centroids_move_num // 16) * 256) %
                                 256 // 16, vconv_offset %
                                 ((coarse_centroids_move_num // 16) * 256) %
                                 256 % 16],
                inner_product_fp32_ub[
                    vconv_offset //
                    ((coarse_centroids_move_num // 16) * 256), vconv_offset %
                    ((coarse_centroids_move_num // 16) * 256) // 256,
                    vconv_offset % ((coarse_centroids_move_num // 16) * 256) %
                    256 // 16, vconv_offset %
                    ((coarse_centroids_move_num // 16) * 256) % 256 % 16],
                vconv_repeat_time, 1, 1, 4, 8)

        with self.tik_instance.for_range(0, queries_block_num) as i:
            with self.tik_instance.for_range(0, coarse_centroids_move_num //
                                             16) as j:
                self.tik_instance.vtranspose(inner_product_ub[i, j, 0, 0],
                                             inner_product_ub[i, j, 0, 0])

        # compute distance each query
        thread_num_need = 2 if queries_move_num > 1 else 1
        with self.tik_instance.for_range(
                0, queries_move_num,
                thread_num=thread_num_need) as loop_queries:
            # compute x2+y2
            add_ub = self.tik_instance.Tensor("float32",
                                              (coarse_centroids_move_num, ),
                                              name="add_ub",
                                              scope=tik.scope_ubuf)
            vadd_repeat_time = coarse_centroids_move_num // \
                self.fp32_vector_mask_max
            vadd_offset = 0
            if vadd_repeat_time > 0:
                self.tik_instance.vadd(self.fp32_vector_mask_max, add_ub[0],
                                       queries_l2_dup_ub[loop_queries, 0],
                                       coarse_centroids_l2_ub[0],
                                       vadd_repeat_time, 1, 1, 1, 8, 8, 8)
                vadd_offset += vadd_repeat_time * self.fp32_vector_mask_max

            vadd_last_num = coarse_centroids_move_num % \
                self.fp32_vector_mask_max
            if vadd_last_num > 0:
                self.tik_instance.vadd(
                    vadd_last_num, add_ub[vadd_offset],
                    queries_l2_dup_ub[loop_queries, vadd_offset],
                    coarse_centroids_l2_ub[vadd_offset], 1, 1, 1, 1, 8, 8, 8)

            # rearrange inner_product in ub
            single_inner_product_ub_fp16 = self.tik_instance.Tensor(
                "float16", (coarse_centroids_move_num, ),
                name="single_inner_product_ub_fp16",
                scope=tik.scope_ubuf)
            self.tik_instance.data_move(
                single_inner_product_ub_fp16[0],
                inner_product_ub[loop_queries // 16, 0, loop_queries % 16, 0],
                0, coarse_centroids_move_num // 16, 1, 15, 0)

            # inner_product do conv from fp16 to fp32
            single_inner_product_ub = self.tik_instance.Tensor(
                "float32", (coarse_centroids_move_num, ),
                name="single_inner_product_ub",
                scope=tik.scope_ubuf)
            vconv_repeat_time = coarse_centroids_move_num // \
                self.fp32_vector_mask_max
            vconv_offset = 0
            if vconv_repeat_time > 0:
                self.tik_instance.vconv(
                    self.fp32_vector_mask_max, "none",
                    single_inner_product_ub[vconv_offset],
                    single_inner_product_ub_fp16[vconv_offset],
                    vconv_repeat_time, 1, 1, 8, 4)
                vconv_offset += vconv_repeat_time * self.fp32_vector_mask_max

            vconv_last_num = coarse_centroids_move_num % \
                self.fp32_vector_mask_max
            if vconv_last_num > 0:
                self.tik_instance.vconv(
                    vconv_last_num, "none",
                    single_inner_product_ub[vconv_offset],
                    single_inner_product_ub_fp16[vconv_offset], 1, 1, 1, 8, 4)

            # compute distance using vaxpy
            vaxpy_repeat_time = coarse_centroids_move_num // \
                self.fp32_vector_mask_max
            vaxpy_offset = 0
            if vaxpy_repeat_time > 0:
                self.tik_instance.vaxpy(self.fp32_vector_mask_max, add_ub[0],
                                        single_inner_product_ub[0], self.coeff,
                                        vaxpy_repeat_time, 1, 1, 8, 8)
                vaxpy_offset += vaxpy_repeat_time * self.fp32_vector_mask_max

            vaxpy_last_num = coarse_centroids_move_num % \
                self.fp32_vector_mask_max
            if vaxpy_last_num > 0:
                self.tik_instance.vaxpy(vaxpy_last_num, add_ub[vaxpy_offset],
                                        single_inner_product_ub[vaxpy_offset],
                                        self.coeff, 1, 1, 1, 8, 8)

            # distances do conv from fp32 to fp16
            dst_ub = self.tik_instance.Tensor("float16",
                                              (coarse_centroids_move_num, ),
                                              name="dst_ub",
                                              scope=tik.scope_ubuf)
            vconv_repeat_time = coarse_centroids_move_num // \
                self.fp32_vector_mask_max
            vconv_offset = 0
            if vconv_repeat_time > 0:
                self.tik_instance.vconv(self.fp32_vector_mask_max, "none",
                                        dst_ub[vconv_offset],
                                        add_ub[vconv_offset],
                                        vconv_repeat_time, 1, 1, 4, 8)
                vconv_offset += vconv_repeat_time * self.fp32_vector_mask_max

            vconv_last_num = coarse_centroids_move_num % \
                self.fp32_vector_mask_max
            if vconv_last_num > 0:
                self.tik_instance.vconv(vconv_last_num, "none",
                                        dst_ub[vconv_offset],
                                        add_ub[vconv_offset], 1, 1, 1, 4, 8)

            # move distance from UB to out
            self.tik_instance.data_move(
                self.output_distances_gm[queries_move_offset + loop_queries,
                                         aicore_move_offset +
                                         coarse_centroids_move_offset],
                dst_ub[0], 0, 1, coarse_centroids_move_num // 16, 0, 0)

    def forward(self):
        aicore_move_offset = self.tik_instance.Scalar(dtype="int32")
        aicore_move_offset.set_as(0)
        with self.tik_instance.for_range(
                0, self.aicore_use, block_num=self.aicore_use) as block_index:
            # compute coarse centroids num and move offest every core
            aicore_move_offset.set_as(block_index *
                                      self.coarse_centroids_num_each_core)
            with self.tik_instance.if_scope(
                    block_index != self.aicore_use - 1):
                aicore_coarse_centroids_num = \
                    self.coarse_centroids_num_each_core
            with self.tik_instance.else_scope():
                aicore_coarse_centroids_num = \
                    self.coarse_centroids_num_last_core

            # compute distance
            queries_loop_time = self.queries_num // self.queries_num_each_loop
            queries_move_offset = self.tik_instance.Scalar(dtype="int32")
            queries_move_offset.set_as(0)
            if queries_loop_time > 0:
                with self.tik_instance.for_range(0, queries_loop_time) \
                                                            as loop_queries:
                    queries_move_offset.set_as(loop_queries *
                                               self.queries_num_each_loop)
                    self.distance_compute_each_loop(
                        aicore_move_offset, aicore_coarse_centroids_num,
                        queries_move_offset, self.queries_num_each_loop)
                queries_move_offset.set_as(queries_move_offset +
                                           self.queries_num_each_loop)

            queries_last_num = self.queries_num % self.queries_num_each_loop
            if queries_last_num > 0:
                self.distance_compute_each_loop(aicore_move_offset,
                                                aicore_coarse_centroids_num,
                                                queries_move_offset,
                                                queries_last_num)

        self.tik_instance.BuildCCE(kernel_name=self.kernel_name,
                                   inputs=[
                                       self.input_queries_gm,
                                       self.input_coarse_centroids_gm,
                                       self.input_precomputed_gm
                                   ],
                                   outputs=[self.output_distances_gm])

        return self.tik_instance


def queries_num_compute(dim):
    return (245746 * dim - 589824) // (6 * dim * dim + 102 * dim + 229376)


def distance_compute(input_queries,
                     input_coarse_centroids,
                     input_precomputed,
                     output_distances,
                     kernel_name="distance_compute"):
    """
    calculating distance

    Parameters
    ----------
    input_queries : dict
        shape and dtype of query vector
    input_coarse_centroids : dict
        shape and dtype of coarse centroids
    input_precomputed : dict
        shape and dtype of precomputed L2 distance of coarse centroids
    output_distances : dict
        shape and dtype of distances, should be same dtype as input_queries
    kernel_name : str
        kernel name, default value is "distance_compute"

    Returns
    -------
    None
    """
    distance_compute_instance = DistanceCompute(input_queries,
                                                input_coarse_centroids,
                                                input_precomputed,
                                                output_distances, kernel_name)
    tik_instance = distance_compute_instance.forward()
    return tik_instance
