#include <iostream>
#include <vector>
#include <chrono>
#include <iomanip>
#include <string>
#include <fstream>
#include <map>
#include <dirent.h>
#include <cstring>
#include <hdf5/serial/H5Cpp.h>
#include <algorithm>
#include <numeric> 
#include <cmath>
#include <random>
#include <cuda_fp16.h>  
#define RESET   "\033[0m"
#define RED     "\033[31m"
#define GREEN   "\033[32m"
#define CHECK(call)\
{\
  const cudaError_t error=call;\
  if(error!=cudaSuccess)\
  {\
      printf(RED "ERROR: %s:%d," ,__FILE__,__LINE__);\
      printf("code:%d,reason:%s\n" RESET,error,cudaGetErrorString(error));\
      exit(1);\
  }\
}
void initDevice(int devNum)
{
  int dev = devNum;
  cudaDeviceProp deviceProp;
  CHECK(cudaGetDeviceProperties(&deviceProp,dev));
  printf(GREEN "Using device %d: %s\n" RESET ,dev,deviceProp.name);
  CHECK(cudaSetDevice(dev));

}
std::vector<std::string> get_files_in_directory(const std::string& dir) {
    std::vector<std::string> files;
    DIR* dp;
    struct dirent* entry;
    if ((dp = opendir(dir.c_str())) != NULL) {
        while ((entry = readdir(dp)) != NULL) {
            std::string filename = entry->d_name;
            if (filename.find(".txt") != std::string::npos) {
                files.push_back(filename);
            }
        }
        closedir(dp);
    } else {
        perror("opendir");
    }
    return files;
}

std::vector<half> read_param(const std::string& filepath) {
    std::vector<half> data;
    std::ifstream file(filepath);
    if (file.is_open()) {
        float value;
        while (file >> value) {
            data.push_back(__float2half(value));
        }
        file.close();
    } else {
        std::cerr << "Unable to open file: " << filepath << std::endl;
    }
    return data;
}

std::map<std::string, std::vector<half>> read_params(const std::string& dir) {
    std::map<std::string, std::vector<half>> params;
    std::vector<std::string> param_files = get_files_in_directory(dir);
    for (const auto& file : param_files) {
        std::string filename = file.substr(0, file.find_last_of(".")); 
        params[filename] = read_param(dir + "/" + file);
    }
    return params;
}

using namespace H5;
void read_h5_file(const std::string& file_path, std::vector<std::vector<half>>& list_of_points_half, std::vector<int>& list_of_labels) {
    try {
        std::vector<std::vector<float>> list_of_points;
        H5File file(file_path, H5F_ACC_RDONLY);
        std::vector<std::string> dataset_names;
        hsize_t num_objs = file.getNumObjs();
        for (hsize_t i = 0; i < num_objs; i++) {
            dataset_names.push_back(file.getObjnameByIdx(i));
        }
        for (const auto& name : dataset_names) {
            DataSet dataset = file.openDataSet(name + "/points");
            DataSpace dataspace = dataset.getSpace();
            hsize_t dims[2];
            dataspace.getSimpleExtentDims(dims, NULL);
            std::vector<float> points(dims[0] * dims[1]);
            dataset.read(points.data(), PredType::NATIVE_FLOAT);
            list_of_points.push_back(points);
            std::vector<half> points_half(dims[0] * dims[1]);
            for (size_t i = 0; i < points.size(); ++i) {
                points_half[i] = half(points[i]);
            }
            list_of_points_half.push_back(points_half);
            
            Attribute label_attr = file.openGroup(name).openAttribute("label");
            int label;
            label_attr.read(PredType::NATIVE_INT, &label);
            list_of_labels.push_back(label);
        }
    } catch (FileIException& error) {
        error.printErrorStack();
    } catch (DataSetIException& error) {
        error.printErrorStack();
    } catch (DataSpaceIException& error) {
        error.printErrorStack();
    } catch (DataTypeIException& error) {
        error.printErrorStack();
    }
}
//todo conv_bn_relu
__global__ void conv_bn_relu(half* point_in, half* point_out,half* weight, half* bias,\
                                   int in_channls,half* mean, half* var, half* gamma, half* beta) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x)+blockIdx.y*blockDim.x*gridDim.x;
    //to 0
    point_out[out_idx] = half(0);
    for (int i = 0; i < in_channls; i++) {
        int in_idx = (i+ blockIdx.x*in_channls)+blockIdx.y*in_channls*gridDim.x;
        int weight_idx = (i+threadIdx.x*in_channls);
        point_out[out_idx] = __hfma(point_in[in_idx], weight[weight_idx], point_out[out_idx]);
    }
    point_out[out_idx] += bias[threadIdx.x];
    point_out[out_idx] = (point_out[out_idx] - mean[threadIdx.x]) /  hsqrt((var[threadIdx.x]) + half(1e-5f));
    half result0=__hfma(point_out[out_idx],gamma[threadIdx.x],beta[threadIdx.x]);
    point_out[out_idx]=result0>half(0)?result0:half(0);
}
//todo conv_bn
__global__ void conv_bn(half* point_in, half* point_out,half* weight, half* bias,\
                                   int in_channls,half* mean, half* var, half* gamma, half* beta) {

    int out_idx = (threadIdx.x +blockIdx.x*blockDim.x)+blockIdx.y*blockDim.x*gridDim.x;
    //to 0
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx = (i+ blockIdx.x* in_channls)+blockIdx.y*in_channls*gridDim.x;
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
    point_out[out_idx] = (point_out[out_idx] - mean[threadIdx.x]) /  hsqrt((var[threadIdx.x]) + half(1e-5f));
    point_out[out_idx] = __hfma(point_out[out_idx],gamma[threadIdx.x],beta[threadIdx.x]);
}
//todo my_max
__global__ void my_max(half* point_in, half* point_out,int num_point) {
    int out_idx = threadIdx.x + blockIdx.x * blockDim.x;
    point_out[out_idx] = -100;
    for (int i = 0; i < num_point; i++) {
        int in_idx = (threadIdx.x+ (i+blockIdx.x*num_point)* blockDim.x);
        point_out[out_idx] = point_in[in_idx] > point_out[out_idx] ? point_in[in_idx] : point_out[out_idx];
    }
}
//todo fc.bn.relu
__global__ void fc_bn_relu(half* point_in, half* point_out, \
                             half* weight, half* bias,int in_channls,int out_channls,\
                             half* mean, half* var, half* gamma, half* beta) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x);
    //to 0 
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.x*in_channls);
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
    point_out[out_idx] = (point_out[out_idx] - mean[threadIdx.x]) /  hsqrt((var[threadIdx.x]) + half(1e-5f));
    half result0=__hfma(point_out[out_idx],gamma[threadIdx.x],beta[threadIdx.x]);
    point_out[out_idx]=result0>half(0)?result0:half(0);
}
//todo fc_eye
__global__ void fc_eye(half* point_in, half* point_out, \
                             half* weight, half* bias,int in_channls,int out_channls) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x);
    //to 0 
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.x*in_channls);
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
    point_out[out_idx] = (out_idx-blockIdx.x*3*3)%4==0 ? point_out[out_idx]+__int2half_rd(1) : point_out[out_idx];
}
//todo feat.fstn.fc3
__global__ void feat_fstn_fc3(half* point_in, half* point_out, \
                             half* weight, half* bias,int in_channls) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x)+blockIdx.y*(blockDim.x*gridDim.x);
    //to 0
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.y*in_channls);
        int weight_idx = (i+ (threadIdx.x+blockIdx.x*blockDim.x)* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x+blockIdx.x*blockDim.x];
    point_out[out_idx] = (out_idx-blockIdx.y*64*64)%65==0 ? point_out[out_idx]+__int2half_rd(1) : point_out[out_idx];
}
//todo fc
__global__ void fc(half* point_in, half* point_out, \
                             half* weight, half* bias,int in_channls,int out_channls) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x);
    //to 0 
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.x*in_channls);
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
}
//todo feat.bmm
__global__ void feat_bmm(half* point_in, half* point_out,half* trans) {
    int out_idx = (threadIdx.x+(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x);
    //to 0
    point_out[out_idx] = 0;
    for(int i=0;i<blockDim.x;i++){
        int in_idx=(i+(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x);
        int weight_idx = (threadIdx.x+(i+blockIdx.y*blockDim.x)*blockDim.x);
        point_out[out_idx] += point_in[in_idx]*trans[weight_idx];
    }
}
void process_points(std::vector<std::vector<half>>& list_of_points,int num_point) {
    std::random_device rd;
    std::mt19937 gen(rd());
    for (auto& points : list_of_points) {
        int n = points.size() / 3;
        if (n > num_point) {
            std::vector<int> indices(n);
            std::iota(indices.begin(), indices.end(), 0); // 填充索引
            std::shuffle(indices.begin(), indices.end(), gen);
            std::vector<half> new_points;
            new_points.reserve(num_point*3);
            for (int i = 0; i < num_point; ++i) {
                int idx = indices[i] * 3;
                new_points.push_back(points[idx]);
                new_points.push_back(points[idx + 1]);
                new_points.push_back(points[idx + 2]);
            }
            points = std::move(new_points);
        } else if (n < num_point*3) {
            std::vector<half> new_points(points.begin(), points.end());
            std::uniform_int_distribution<> dis(0, n - 1);
            while (new_points.size() / 3 < num_point) {
                int idx = dis(gen) * 3;
                new_points.push_back(new_points[idx]);
                new_points.push_back(new_points[idx + 1]);
                new_points.push_back(new_points[idx + 2]);
            }
            points = std::move(new_points);
        }
    }
}
std::vector<int> log_softmax_and_argmax(half* x, int batch, int n = 10) {

    std::vector<int> result(batch); 
    float max_val[batch]; 
    float sum[batch]; 
    float y[n * batch]; 
    for (int b = 0; b < batch; b++) {
        max_val[b] = -INFINITY;
        for (int i = 0; i < n; ++i) {
            max_val[b] = std::max(max_val[b], __half2float(x[i + b * n]));
        }
    }
    for (int b = 0; b < batch; b++) {
        sum[b] = 0;
        for (int i = 0; i < n; ++i) {
            sum[b] += std::exp(__half2float(x[i + b * n]) - max_val[b]);
        }
    }
    for (int b = 0; b < batch; b++) {
        for (int i = 0; i < n; ++i) {
            y[i + b * n] = std::log(std::exp(__half2float(x[i + b * n]) - max_val[b]) / sum[b]);
        }
    }
    for (int b = 0; b < batch; b++) {
        int pred = 0;
        for (int i = 1; i < n; ++i) {
            if (y[i + b * n] > y[pred + b * n]) { 
                pred = i;
            }
        }
        result[b] = pred; 
    }
    return result;
}
std::vector<std::vector<half>> reorganize_to_batches(const std::vector<std::vector<half>>& list_of_points, 
    size_t batch_size) {
    size_t point_size = list_of_points.front().size(); // 假设所有点的维度相同
    size_t num_batches = (list_of_points.size() + batch_size - 1) / batch_size; // 向上取整
    std::vector<std::vector<half>> list_of_batch(num_batches, std::vector<half>(batch_size * point_size, 0)); // 初始化每个batch，填充0

    for (size_t batch_idx = 0; batch_idx < num_batches; ++batch_idx) {
        size_t start_idx = batch_idx * batch_size;
        size_t end_idx = std::min(start_idx + batch_size, list_of_points.size());
        size_t current_batch_size = end_idx - start_idx;

        for (size_t i = 0; i < current_batch_size; ++i) {
            const auto& point = list_of_points[start_idx + i];
            for (size_t j = 0; j < point_size; ++j) {
                list_of_batch[batch_idx][i * point_size + j] = point[j]; // 复制实际的点数据
            }
        }
    }
    return list_of_batch;
}
std::vector<std::vector<int>> reorganize_to_batches_labels(const std::vector<int>& list_of_labels, size_t batch_size) {
    size_t num_batches = (list_of_labels.size() + batch_size - 1) / batch_size; // 向上取整
    std::vector<std::vector<int>> label_of_batch(num_batches, std::vector<int>(batch_size, -1)); // 初始化每个batch，填充0

    for (size_t batch_idx = 0; batch_idx < num_batches; ++batch_idx) {
        size_t start_idx = batch_idx * batch_size;
        size_t end_idx = std::min(start_idx + batch_size, list_of_labels.size());
        size_t current_batch_size = end_idx - start_idx;
        for (size_t i = 0; i < current_batch_size; ++i) {
            label_of_batch[batch_idx][i] = list_of_labels[start_idx + i]; // 复制实际的label
        }
    }
    return label_of_batch;
}
//todo fc_softmax_log
__global__ void fc_softmax_log(half* point_in, half* point_out, \
                             half* weight, half* bias,int in_channls,int out_channls) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x);
    //to 0 
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.x*in_channls);
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
    __syncthreads();
    __shared__ half max_val;
    __shared__ half sum_val;
    if(point_out[out_idx] > max_val) {
        max_val = point_out[out_idx];
    }
    __syncthreads();
    point_out[out_idx] = point_out[out_idx] - max_val;
    sum_val += hexp(point_out[out_idx]);
    __syncthreads();
    point_out[out_idx] = point_out[out_idx]-hlog(sum_val);
}


int findMaxIdx(half* array) {
    float maxVal = float(array[0]);
    int maxIdx = 0;
    for (int i = 1; i < 10; ++i) {
        if (float(array[i]) > maxVal) {
            maxIdx = i;
            maxVal = array[i];
        }
    }
    return  maxIdx;
}

int main(int argc, char *argv[]) {
        
    //todo read params and weight
    initDevice(0);
    auto params = read_params("./weight");
    std::string file_path = "./data/test_point_clouds.h5";
    std::vector<std::vector<half>> list_of_points;
    std::vector<int> list_of_labels;
    read_h5_file(file_path, list_of_points, list_of_labels);
    int n_points= 1<<14;
    int batch_size = 32;
    process_points(list_of_points, n_points);
    std::vector<std::vector<half>>list_batch = reorganize_to_batches(list_of_points, batch_size);
    std::vector<std::vector<int>>label_batch = reorganize_to_batches_labels(list_of_labels, batch_size);
    list_of_points.clear();
    list_of_labels.clear();
    half* result_out_gpu=(half*)malloc(batch_size*10*sizeof(half));

    //todo prepare device memory
    half* gloabl_mem1;
    half* gloabl_mem2; 
    half* gloabl_mem3; 
    CHECK(cudaMalloc((void**)&gloabl_mem1, batch_size*n_points*1024*sizeof(half)));
    CHECK(cudaMalloc((void**)&gloabl_mem2, batch_size*n_points*1024*sizeof(half)));
    CHECK(cudaMalloc((void**)&gloabl_mem3, batch_size*n_points*1024*sizeof(half)));

    
    //todo flat raw_points
    std::vector<half> flat_list_batch;
    for (const auto& feature_vector : list_batch) {
        flat_list_batch.insert(flat_list_batch.end(), feature_vector.begin(), feature_vector.end());
    }
    half* raw_points;
    CHECK(cudaMalloc((void**)&raw_points,flat_list_batch.size() * sizeof(half)));
    CHECK(cudaMemcpy(raw_points, flat_list_batch.data(), flat_list_batch.size() * sizeof(half), cudaMemcpyHostToDevice));

 
    

    //feat.stn.conv1
    int feat_stn_conv1_in_channls = 3;
    int feat_stn_conv1_out_channls=64;
    half* Dfeat_stn_conv1_weight;
    half* Dfeat_stn_conv1_bias;
    half* Dfeat_stn_conv1_output=gloabl_mem2;
    // CHECK(cudaMalloc((void**)&Dfeat_stn_conv1_output, batch_size*feat_stn_conv1_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv1_weight, feat_stn_conv1_in_channls*feat_stn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv1_bias, feat_stn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_conv1_weight, params["feat.stn.conv1.weight"].data(), feat_stn_conv1_in_channls*feat_stn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_conv1_bias, params["feat.stn.conv1.bias"].data(), feat_stn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.stn.bn1
    half* Dfeat_stn_bn1_mean;
    half* Dfeat_stn_bn1_var;
    half* Dfeat_stn_bn1_gamma;
    half* Dfeat_stn_bn1_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn1_mean, feat_stn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn1_var, feat_stn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn1_gamma, feat_stn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn1_beta, feat_stn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_bn1_mean, params["feat.stn.bn1.running_mean"].data(), feat_stn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn1_var, params["feat.stn.bn1.running_var"].data(), feat_stn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn1_gamma, params["feat.stn.bn1.weight"].data(), feat_stn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn1_beta, params["feat.stn.bn1.bias"].data(), feat_stn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    // feat.stn.conv2
    int feat_stn_conv2_in_channls =64;
    int feat_stn_conv2_out_channls=128;
    half* Dfeat_stn_conv2_weight;
    half* Dfeat_stn_conv2_bias;
    half* Dfeat_stn_conv2_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_stn_conv2_output, batch_size*feat_stn_conv2_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv2_weight, feat_stn_conv2_in_channls*feat_stn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv2_bias, feat_stn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_conv2_weight, params["feat.stn.conv2.weight"].data(), feat_stn_conv2_in_channls*feat_stn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_conv2_bias, params["feat.stn.conv2.bias"].data(), feat_stn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    // feat.stn.bn2
    half* Dfeat_stn_bn2_mean;
    half* Dfeat_stn_bn2_var;
    half* Dfeat_stn_bn2_gamma;
    half* Dfeat_stn_bn2_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn2_mean, feat_stn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn2_var, feat_stn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn2_gamma, feat_stn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn2_beta, feat_stn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_bn2_mean, params["feat.stn.bn2.running_mean"].data(), feat_stn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn2_var, params["feat.stn.bn2.running_var"].data(), feat_stn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn2_gamma, params["feat.stn.bn2.weight"].data(), feat_stn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn2_beta, params["feat.stn.bn2.bias"].data(), feat_stn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.stn.conv3
    int feat_stn_conv3_in_channls =128;
    int feat_stn_conv3_out_channls=1024;
    half* Dfeat_stn_conv3_weight;
    half* Dfeat_stn_conv3_bias;
    half* Dfeat_stn_conv3_output=gloabl_mem2;
    // CHECK(cudaMalloc((void**)&Dfeat_stn_conv3_output, batch_size*feat_stn_conv3_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv3_weight, feat_stn_conv3_in_channls*feat_stn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv3_bias, feat_stn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_conv3_weight, params["feat.stn.conv3.weight"].data(), feat_stn_conv3_in_channls*feat_stn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_conv3_bias, params["feat.stn.conv3.bias"].data(), feat_stn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.stn.bn3
    half* Dfeat_stn_bn3_mean;
    half* Dfeat_stn_bn3_var;
    half* Dfeat_stn_bn3_gamma;
    half* Dfeat_stn_bn3_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn3_mean, feat_stn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn3_var, feat_stn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn3_gamma, feat_stn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn3_beta, feat_stn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_bn3_mean, params["feat.stn.bn3.running_mean"].data(), feat_stn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn3_var, params["feat.stn.bn3.running_var"].data(), feat_stn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn3_gamma, params["feat.stn.bn3.weight"].data(), feat_stn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn3_beta, params["feat.stn.bn3.bias"].data(), feat_stn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.stn.max
    half* Dfeat_stn_max_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_stn_max_output, batch_size*feat_stn_conv3_out_channls*sizeof(half)));    

    //feat.stn.fc1
    int feat_stn_fc1_in_channls =1024;
    int feat_stn_fc1_out_channls=512;
    half* Dfeat_stn_fc1_weight;
    half* Dfeat_stn_fc1_bias;
    half* Dfeat_stn_fc1_output=gloabl_mem2;
    // CHECK(cudaMalloc((void**)&Dfeat_stn_fc1_output, batch_size*feat_stn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc1_weight, feat_stn_fc1_in_channls*feat_stn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc1_bias, feat_stn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_fc1_weight, params["feat.stn.fc1.weight"].data(), feat_stn_fc1_in_channls*feat_stn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_fc1_bias, params["feat.stn.fc1.bias"].data(), feat_stn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.stn.bn4
    half* Dfeat_stn_bn4_mean;
    half* Dfeat_stn_bn4_var;
    half* Dfeat_stn_bn4_gamma;
    half* Dfeat_stn_bn4_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn4_mean, feat_stn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn4_var, feat_stn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn4_gamma, feat_stn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn4_beta, feat_stn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_bn4_mean, params["feat.stn.bn4.running_mean"].data(), feat_stn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn4_var, params["feat.stn.bn4.running_var"].data(), feat_stn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn4_gamma, params["feat.stn.bn4.weight"].data(), feat_stn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn4_beta, params["feat.stn.bn4.bias"].data(), feat_stn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))


    //feat.stn.fc2
    int feat_stn_fc2_in_channls =512;
    int feat_stn_fc2_out_channls=256;
    half* Dfeat_stn_fc2_weight;
    half* Dfeat_stn_fc2_bias;
    half* Dfeat_stn_fc2_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_stn_fc2_output, batch_size*feat_stn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc2_weight, feat_stn_fc2_in_channls*feat_stn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc2_bias, feat_stn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_fc2_weight, params["feat.stn.fc2.weight"].data(), feat_stn_fc2_in_channls*feat_stn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_fc2_bias, params["feat.stn.fc2.bias"].data(), feat_stn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.stn.bn5
    half* Dfeat_stn_bn5_mean;
    half* Dfeat_stn_bn5_var;
    half* Dfeat_stn_bn5_gamma;
    half* Dfeat_stn_bn5_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn5_mean, feat_stn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn5_var, feat_stn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn5_gamma, feat_stn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn5_beta, feat_stn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_bn5_mean, params["feat.stn.bn5.running_mean"].data(), feat_stn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn5_var, params["feat.stn.bn5.running_var"].data(), feat_stn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn5_gamma, params["feat.stn.bn5.weight"].data(), feat_stn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn5_beta, params["feat.stn.bn5.bias"].data(), feat_stn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.stn.fc3
    int feat_stn_fc3_in_channls =256;
    int feat_stn_fc3_out_channls=9;
    half* Dfeat_stn_fc3_weight;
    half* Dfeat_stn_fc3_bias;
    half* Dfeat_stn_fc3_output=gloabl_mem2;
    // CHECK(cudaMalloc((void**)&Dfeat_stn_fc3_output, batch_size*feat_stn_fc3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc3_weight, feat_stn_fc3_in_channls*feat_stn_fc3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc3_bias, feat_stn_fc3_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_stn_fc3_weight, params["feat.stn.fc3.weight"].data(), feat_stn_fc3_in_channls*feat_stn_fc3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_fc3_bias, params["feat.stn.fc3.bias"].data(), feat_stn_fc3_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.bmm.trans
    half* Dfeat_bmm_trans_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_bmm_trans_output, batch_size*n_points*3*sizeof(half)));

    //feat.conv1
    int feat_conv1_in_channls =3;
    int feat_conv1_out_channls=64;
    half* Dfeat_conv1_weight;
    half* Dfeat_conv1_bias;
    half* Dfeat_conv1_output=gloabl_mem2;
    // CHECK(cudaMalloc((void**)&Dfeat_conv1_output, batch_size*feat_conv1_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_conv1_weight, feat_conv1_in_channls*feat_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_conv1_bias, feat_conv1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_conv1_weight, params["feat.conv1.weight"].data(), feat_conv1_in_channls*feat_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_conv1_bias, params["feat.conv1.bias"].data(), feat_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.bn1
    half* Dfeat_bn1_mean;
    half* Dfeat_bn1_var;
    half* Dfeat_bn1_gamma;
    half* Dfeat_bn1_beta;
    CHECK(cudaMalloc((void**)&Dfeat_bn1_mean, feat_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn1_var, feat_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn1_gamma, feat_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn1_beta, feat_conv1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_bn1_mean, params["feat.bn1.running_mean"].data(), feat_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn1_var, params["feat.bn1.running_var"].data(), feat_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn1_gamma, params["feat.bn1.weight"].data(), feat_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn1_beta, params["feat.bn1.bias"].data(), feat_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.fstn.conv1
    int feat_fstn_conv1_in_channls =64;
    int feat_fstn_conv1_out_channls=64;
    half* Dfeat_fstn_conv1_weight;
    half* Dfeat_fstn_conv1_bias;
    half* Dfeat_fstn_conv1_output=gloabl_mem1;
    // CHECK(cudaMalloc((void**)&Dfeat_fstn_conv1_output, batch_size*feat_fstn_conv1_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv1_weight, feat_fstn_conv1_in_channls*feat_fstn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv1_bias, feat_fstn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_fstn_conv1_weight, params["feat.fstn.conv1.weight"].data(), feat_fstn_conv1_in_channls*feat_fstn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_conv1_bias, params["feat.fstn.conv1.bias"].data(), feat_fstn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.fstn.bn1
    half* Dfeat_fstn_bn1_mean;
    half* Dfeat_fstn_bn1_var;
    half* Dfeat_fstn_bn1_gamma;
    half* Dfeat_fstn_bn1_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn1_mean, feat_fstn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn1_var, feat_fstn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn1_gamma, feat_fstn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn1_beta, feat_fstn_conv1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_fstn_bn1_mean,  params["feat.fstn.bn1.running_mean"].data(), feat_fstn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn1_var,   params["feat.fstn.bn1.running_var"].data(), feat_fstn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn1_gamma, params["feat.fstn.bn1.weight"].data(), feat_fstn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn1_beta,  params["feat.fstn.bn1.bias"].data(), feat_fstn_conv1_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.fstn.conv2
    int feat_fstn_conv2_in_channls =64;
    int feat_fstn_conv2_out_channls=128;
    half* Dfeat_fstn_conv2_weight;
    half* Dfeat_fstn_conv2_bias;
    half* Dfeat_fstn_conv2_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_fstn_conv2_output, batch_size*feat_fstn_conv2_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv2_weight, feat_fstn_conv2_in_channls*feat_fstn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv2_bias, feat_fstn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_fstn_conv2_weight, params["feat.fstn.conv2.weight"].data(), feat_fstn_conv2_in_channls*feat_fstn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_conv2_bias, params["feat.fstn.conv2.bias"].data(), feat_fstn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.fstn.bn2
    half* Dfeat_fstn_bn2_mean;
    half* Dfeat_fstn_bn2_var;
    half* Dfeat_fstn_bn2_gamma;
    half* Dfeat_fstn_bn2_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn2_mean, feat_fstn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn2_var, feat_fstn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn2_gamma, feat_fstn_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn2_beta, feat_fstn_conv2_out_channls*sizeof(half))); 
    CHECK(cudaMemcpy(Dfeat_fstn_bn2_mean, params["feat.fstn.bn2.running_mean"].data(), feat_fstn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn2_var, params["feat.fstn.bn2.running_var"].data(), feat_fstn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn2_gamma, params["feat.fstn.bn2.weight"].data(), feat_fstn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn2_beta, params["feat.fstn.bn2.bias"].data(), feat_fstn_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.fstn.conv3
    int feat_fstn_conv3_in_channls =128;
    int feat_fstn_conv3_out_channls=1024;
    half* Dfeat_fstn_conv3_weight;
    half* Dfeat_fstn_conv3_bias;
    half* Dfeat_fstn_conv3_output=gloabl_mem1;
    // CHECK(cudaMalloc((void**)&Dfeat_fstn_conv3_output, batch_size*feat_fstn_conv3_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv3_weight, feat_fstn_conv3_in_channls*feat_fstn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv3_bias, feat_fstn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_fstn_conv3_weight, params["feat.fstn.conv3.weight"].data(), feat_fstn_conv3_in_channls*feat_fstn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_conv3_bias, params["feat.fstn.conv3.bias"].data(), feat_fstn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.fstn.bn3
    half* Dfeat_fstn_bn3_mean;
    half* Dfeat_fstn_bn3_var;
    half* Dfeat_fstn_bn3_gamma;
    half* Dfeat_fstn_bn3_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn3_mean, feat_fstn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn3_var, feat_fstn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn3_gamma, feat_fstn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn3_beta, feat_fstn_conv3_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_fstn_bn3_mean, params["feat.fstn.bn3.running_mean"].data(), feat_fstn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn3_var, params["feat.fstn.bn3.running_var"].data(), feat_fstn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn3_gamma, params["feat.fstn.bn3.weight"].data(), feat_fstn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn3_beta, params["feat.fstn.bn3.bias"].data(), feat_fstn_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.fstn.max
    half* Dfeat_fstn_max_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_fstn_max_output, batch_size*feat_fstn_conv3_out_channls*sizeof(half)));  

    //feat.fstn.fc1
    int feat_fstn_fc1_in_channls =1024;
    int feat_fstn_fc1_out_channls=512;
    half* Dfeat_fstn_fc1_weight;
    half* Dfeat_fstn_fc1_bias;
    half* Dfeat_fstn_fc1_output=gloabl_mem1;
    // CHECK(cudaMalloc((void**)&Dfeat_fstn_fc1_output, batch_size*feat_fstn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc1_weight, feat_fstn_fc1_in_channls*feat_fstn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc1_bias, feat_fstn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_fstn_fc1_weight, params["feat.fstn.fc1.weight"].data(), feat_fstn_fc1_in_channls*feat_fstn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_fc1_bias, params["feat.fstn.fc1.bias"].data(), feat_fstn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.fstn.bn4
    half* Dfeat_fstn_bn4_mean;
    half* Dfeat_fstn_bn4_var;
    half* Dfeat_fstn_bn4_gamma;
    half* Dfeat_fstn_bn4_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn4_mean, feat_fstn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn4_var, feat_fstn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn4_gamma, feat_fstn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn4_beta, feat_fstn_fc1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_fstn_bn4_mean, params["feat.fstn.bn4.running_mean"].data(), feat_fstn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn4_var, params["feat.fstn.bn4.running_var"].data(), feat_fstn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn4_gamma, params["feat.fstn.bn4.weight"].data(), feat_fstn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn4_beta, params["feat.fstn.bn4.bias"].data(), feat_fstn_fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    
    //feat.fstn.fc2
    int feat_fstn_fc2_in_channls =512;
    int feat_fstn_fc2_out_channls=256;
    half* Dfeat_fstn_fc2_weight;
    half* Dfeat_fstn_fc2_bias;
    half* Dfeat_fstn_fc2_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_fstn_fc2_output, batch_size*feat_fstn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc2_weight, feat_fstn_fc2_in_channls*feat_fstn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc2_bias, feat_fstn_fc2_out_channls*sizeof(half)));  
    CHECK(cudaMemcpy(Dfeat_fstn_fc2_weight, params["feat.fstn.fc2.weight"].data(), feat_fstn_fc2_in_channls*feat_fstn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_fc2_bias, params["feat.fstn.fc2.bias"].data(), feat_fstn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.fstn.bn5
    half* Dfeat_fstn_bn5_mean;
    half* Dfeat_fstn_bn5_var;
    half* Dfeat_fstn_bn5_gamma;
    half* Dfeat_fstn_bn5_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn5_mean, feat_fstn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn5_var, feat_fstn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn5_gamma, feat_fstn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn5_beta, feat_fstn_fc2_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_fstn_bn5_mean, params["feat.fstn.bn5.running_mean"].data(), feat_fstn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn5_var, params["feat.fstn.bn5.running_var"].data(), feat_fstn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn5_gamma, params["feat.fstn.bn5.weight"].data(), feat_fstn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn5_beta, params["feat.fstn.bn5.bias"].data(), feat_fstn_fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    
    //feat.fstn.fc3
    int feat_fstn_fc3_in_channls =256;
    int feat_fstn_fc3_out_channls=64*64;
    half* Dfeat_fstn_fc3_weight;
    half* Dfeat_fstn_fc3_bias;
    half* Dfeat_fstn_fc3_output=gloabl_mem1;
    // CHECK(cudaMalloc((void**)&Dfeat_fstn_fc3_output, batch_size*feat_fstn_fc3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc3_weight, feat_fstn_fc3_in_channls*feat_fstn_fc3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc3_bias, feat_fstn_fc3_out_channls*sizeof(half)));  
    CHECK(cudaMemcpy(Dfeat_fstn_fc3_weight, params["feat.fstn.fc3.weight"].data(), feat_fstn_fc3_in_channls*feat_fstn_fc3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_fc3_bias, params["feat.fstn.fc3.bias"].data(), feat_fstn_fc3_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.bmm1.trans.feat
    half* Dfeat_bmm_trans_feat_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_bmm_trans_feat_output, batch_size*n_points*64*sizeof(half)));

    //feat.conv2
    int feat_conv2_in_channls =64;
    int feat_conv2_out_channls=128;
    half* Dfeat_conv2_weight;
    half* Dfeat_conv2_bias;
    half* Dfeat_conv2_output=gloabl_mem1;
    // CHECK(cudaMalloc((void**)&Dfeat_conv2_output, batch_size*feat_conv2_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_conv2_weight, feat_conv2_in_channls*feat_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_conv2_bias, feat_conv2_out_channls*sizeof(half)));  
    CHECK(cudaMemcpy(Dfeat_conv2_weight, params["feat.conv2.weight"].data(), feat_conv2_in_channls*feat_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_conv2_bias, params["feat.conv2.bias"].data(), feat_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.bn2
    half* Dfeat_bn2_mean;
    half* Dfeat_bn2_var;
    half* Dfeat_bn2_gamma;
    half* Dfeat_bn2_beta;
    CHECK(cudaMalloc((void**)&Dfeat_bn2_mean, feat_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn2_var, feat_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn2_gamma, feat_conv2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn2_beta, feat_conv2_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_bn2_mean, params["feat.bn2.running_mean"].data(), feat_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn2_var, params["feat.bn2.running_var"].data(), feat_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn2_gamma, params["feat.bn2.weight"].data(), feat_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn2_beta, params["feat.bn2.bias"].data(), feat_conv2_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.conv3
    int feat_conv3_in_channls =128;
    int feat_conv3_out_channls=1024;
    half* Dfeat_conv3_weight;
    half* Dfeat_conv3_bias;
    half* Dfeat_conv3_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfeat_conv3_output, batch_size*feat_conv3_out_channls*n_points*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_conv3_weight, feat_conv3_in_channls*feat_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_conv3_bias, feat_conv3_out_channls*sizeof(half)));  
    CHECK(cudaMemcpy(Dfeat_conv3_weight, params["feat.conv3.weight"].data(), feat_conv3_in_channls*feat_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_conv3_bias, params["feat.conv3.bias"].data(), feat_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //feat.bn3
    half* Dfeat_bn3_mean;
    half* Dfeat_bn3_var;
    half* Dfeat_bn3_gamma;
    half* Dfeat_bn3_beta;
    CHECK(cudaMalloc((void**)&Dfeat_bn3_mean, feat_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn3_var, feat_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn3_gamma, feat_conv3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfeat_bn3_beta, feat_conv3_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dfeat_bn3_mean, params["feat.bn3.running_mean"].data(), feat_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn3_var, params["feat.bn3.running_var"].data(), feat_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn3_gamma, params["feat.bn3.weight"].data(), feat_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn3_beta, params["feat.bn3.bias"].data(), feat_conv3_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //feat.max
    half* Dfeat_max_output=gloabl_mem1;
    // CHECK(cudaMalloc((void**)&Dfeat_max_output, batch_size*feat_conv3_out_channls*sizeof(half)));

    //fc1
    int fc1_in_channls =1024;
    int fc1_out_channls=512;
    half* Dfc1_weight;
    half* Dfc1_bias;
    half* Dfc1_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfc1_output, batch_size*fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfc1_weight, fc1_in_channls*fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfc1_bias, fc1_out_channls*sizeof(half)));  
    CHECK(cudaMemcpy(Dfc1_weight, params["fc1.weight"].data(), fc1_in_channls*fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfc1_bias, params["fc1.bias"].data(), fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //bn1
    half* Dbn1_mean;
    half* Dbn1_var;
    half* Dbn1_gamma;
    half* Dbn1_beta;
    CHECK(cudaMalloc((void**)&Dbn1_mean, fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dbn1_var, fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dbn1_gamma, fc1_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dbn1_beta, fc1_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dbn1_mean, params["bn1.running_mean"].data(), fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn1_var, params["bn1.running_var"].data(), fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn1_gamma, params["bn1.weight"].data(), fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn1_beta, params["bn1.bias"].data(), fc1_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //fc2
    int fc2_in_channls =512;
    int fc2_out_channls=256;
    half* Dfc2_weight;
    half* Dfc2_bias;
    half* Dfc2_output=gloabl_mem1;
    // CHECK(cudaMalloc((void**)&Dfc2_output, batch_size*fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfc2_weight, fc2_in_channls*fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfc2_bias, fc2_out_channls*sizeof(half)));  
    CHECK(cudaMemcpy(Dfc2_weight, params["fc2.weight"].data(), fc2_in_channls*fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfc2_bias, params["fc2.bias"].data(), fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    //bn2
    half* Dbn2_mean;
    half* Dbn2_var;
    half* Dbn2_gamma;
    half* Dbn2_beta;
    CHECK(cudaMalloc((void**)&Dbn2_mean, fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dbn2_var, fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dbn2_gamma, fc2_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dbn2_beta, fc2_out_channls*sizeof(half)));
    CHECK(cudaMemcpy(Dbn2_mean, params["bn2.running_mean"].data(), fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn2_var, params["bn2.running_var"].data(), fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn2_gamma, params["bn2.weight"].data(), fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn2_beta, params["bn2.bias"].data(), fc2_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //fc3
    int fc3_in_channls =256;
    int fc3_out_channls=10;
    half* Dfc3_weight;
    half* Dfc3_bias;
    half* Dfc3_output=gloabl_mem3;
    // CHECK(cudaMalloc((void**)&Dfc3_output, batch_size*fc3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfc3_weight, fc3_in_channls*fc3_out_channls*sizeof(half)));
    CHECK(cudaMalloc((void**)&Dfc3_bias, fc3_out_channls*sizeof(half)));  
    CHECK(cudaMemcpy(Dfc3_weight, params["fc3.weight"].data(), fc3_in_channls*fc3_out_channls*sizeof(half), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfc3_bias, params["fc3.bias"].data(), fc3_out_channls*sizeof(half), cudaMemcpyHostToDevice))

    //todo kernel
    dim3 feat_stn_conv1_block_size(feat_stn_conv1_out_channls);
    dim3 feat_stn_conv1_grid_size(n_points,batch_size);
    dim3 feat_stn_conv2_block_size(feat_stn_conv2_out_channls);
    dim3 feat_stn_conv2_grid_size(n_points,batch_size);
    dim3 feat_stn_conv3_block_size(feat_stn_conv3_out_channls);
    dim3 feat_stn_conv3_grid_size(n_points,batch_size);
    dim3 feat_stn_max_block_size(feat_stn_conv3_out_channls);
    dim3 feat_stn_max_grid_size(batch_size);
    dim3 feat_stn_fc1_block_size(feat_stn_fc1_out_channls);
    dim3 feat_stn_fc1_grid_size(batch_size);
    dim3 feat_stn_fc2_block_size(feat_stn_fc2_out_channls);
    dim3 feat_stn_fc2_grid_size(batch_size);
    dim3 feat_stn_fc3_block_size(feat_stn_fc3_out_channls);
    dim3 feat_stn_fc3_grid_size(batch_size);
    dim3 feat_bmm_trans_block_size(3);
    dim3 feat_bmm_trans_grid_size(n_points,batch_size);
    dim3 feat_conv1_block_size(feat_conv1_out_channls);
    dim3 feat_conv1_grid_size(n_points,batch_size);
    dim3 feat_fstn_conv1_block_size(feat_fstn_conv1_out_channls);
    dim3 feat_fstn_conv1_grid_size(n_points,batch_size);
    dim3 feat_fstn_conv2_block_size(feat_fstn_conv2_out_channls);
    dim3 feat_fstn_conv2_grid_size(n_points,batch_size);
    dim3 feat_fstn_conv3_block_size(feat_fstn_conv3_out_channls);
    dim3 feat_fstn_conv3_grid_size(n_points,batch_size);
    dim3 feat_fstn_max_block_size(feat_fstn_conv3_out_channls);
    dim3 feat_fstn_max_grid_size(batch_size);
    dim3 feat_fstn_fc1_block_size(feat_fstn_fc1_out_channls);
    dim3 feat_fstn_fc1_grid_size(batch_size);
    dim3 feat_fstn_fc2_block_size(feat_fstn_fc2_out_channls);
    dim3 feat_fstn_fc2_grid_size(batch_size);
    dim3 feat_fstn_fc3_block_size(1024);
    dim3 feat_fstn_fc3_grid_size(4,batch_size);
    dim3 feat_bmm_trans_feat_block_size(64);
    dim3 feat_bmm_trans_feat_grid_size(n_points,batch_size);
    dim3 feat_conv2_block_size(feat_conv2_out_channls);
    dim3 feat_conv2_grid_size(n_points,batch_size);
    dim3 feat_conv3_block_size(feat_conv3_out_channls);
    dim3 feat_conv3_grid_size(n_points,batch_size);
    dim3 feat_max_block_size(feat_conv3_out_channls);
    dim3 feat_max_grid_size( batch_size);
    dim3 feat_fc1_block_size(fc1_out_channls);
    dim3 feat_fc1_grid_size(batch_size);
    dim3 feat_fc2_block_size(fc2_out_channls);
    dim3 feat_fc2_grid_size(batch_size);
    dim3 feat_fc3_block_size(fc3_out_channls);
    dim3 feat_fc3_grid_size(batch_size);
    int count_true=0;
    auto start = std::chrono::high_resolution_clock::now();
    for(int i=0;i<list_batch.size();i++){
        //todo copy data
        gloabl_mem1=&raw_points[i*(batch_size*n_points*3)];

        //todo run kernel
        conv_bn_relu<<<feat_stn_conv1_grid_size, feat_stn_conv1_block_size>>>(\
                    gloabl_mem1, Dfeat_stn_conv1_output,\
                    Dfeat_stn_conv1_weight, Dfeat_stn_conv1_bias, feat_stn_conv1_in_channls, \
                    Dfeat_stn_bn1_mean, Dfeat_stn_bn1_var, Dfeat_stn_bn1_gamma, Dfeat_stn_bn1_beta);
        conv_bn_relu<<<feat_stn_conv2_grid_size, feat_stn_conv2_block_size>>>(\
                    Dfeat_stn_conv1_output, Dfeat_stn_conv2_output,\
                    Dfeat_stn_conv2_weight, Dfeat_stn_conv2_bias, feat_stn_conv2_in_channls, \
                    Dfeat_stn_bn2_mean, Dfeat_stn_bn2_var, Dfeat_stn_bn2_gamma, Dfeat_stn_bn2_beta);
        conv_bn_relu<<<feat_stn_conv3_grid_size, feat_stn_conv3_block_size>>>(\
                    Dfeat_stn_conv2_output, Dfeat_stn_conv3_output,\
                    Dfeat_stn_conv3_weight, Dfeat_stn_conv3_bias, feat_stn_conv3_in_channls, \
                    Dfeat_stn_bn3_mean, Dfeat_stn_bn3_var, Dfeat_stn_bn3_gamma, Dfeat_stn_bn3_beta);
        my_max<<<feat_stn_max_grid_size, feat_stn_max_block_size>>>(Dfeat_stn_conv3_output, Dfeat_stn_max_output,n_points);
        fc_bn_relu<<<feat_stn_fc1_grid_size, feat_stn_fc1_block_size>>>(\
                    Dfeat_stn_max_output, Dfeat_stn_fc1_output,\
                    Dfeat_stn_fc1_weight, Dfeat_stn_fc1_bias, feat_stn_fc1_in_channls,feat_stn_fc1_out_channls,\
                    Dfeat_stn_bn4_mean, Dfeat_stn_bn4_var, Dfeat_stn_bn4_gamma, Dfeat_stn_bn4_beta);
        fc_bn_relu<<<feat_stn_fc2_grid_size, feat_stn_fc2_block_size>>>(\
                    Dfeat_stn_fc1_output, Dfeat_stn_fc2_output,\
                    Dfeat_stn_fc2_weight, Dfeat_stn_fc2_bias, feat_stn_fc2_in_channls,feat_stn_fc2_out_channls,\
                    Dfeat_stn_bn5_mean, Dfeat_stn_bn5_var, Dfeat_stn_bn5_gamma, Dfeat_stn_bn5_beta);
        fc_eye<<<feat_stn_fc3_grid_size, feat_stn_fc3_block_size>>>(\
                    Dfeat_stn_fc2_output, Dfeat_stn_fc3_output,\
                    Dfeat_stn_fc3_weight, Dfeat_stn_fc3_bias, feat_stn_fc3_in_channls,feat_stn_fc3_out_channls);
        feat_bmm<<<feat_bmm_trans_grid_size, feat_bmm_trans_block_size>>>(gloabl_mem1, Dfeat_bmm_trans_output, Dfeat_stn_fc3_output);
        conv_bn_relu<<<feat_conv1_grid_size, feat_conv1_block_size>>>(\
                    Dfeat_bmm_trans_output, Dfeat_conv1_output,\
                    Dfeat_conv1_weight, Dfeat_conv1_bias, feat_conv1_in_channls,\
                    Dfeat_bn1_mean, Dfeat_bn1_var, Dfeat_bn1_gamma, Dfeat_bn1_beta);
        conv_bn_relu<<<feat_fstn_conv1_grid_size, feat_fstn_conv1_block_size>>>(\
                    Dfeat_conv1_output, Dfeat_fstn_conv1_output,\
                    Dfeat_fstn_conv1_weight, Dfeat_fstn_conv1_bias, feat_fstn_conv1_in_channls,\
                    Dfeat_fstn_bn1_mean, Dfeat_fstn_bn1_var, Dfeat_fstn_bn1_gamma, Dfeat_fstn_bn1_beta);
        conv_bn_relu<<<feat_fstn_conv2_grid_size, feat_fstn_conv2_block_size>>>(\
                    Dfeat_fstn_conv1_output, Dfeat_fstn_conv2_output,\
                    Dfeat_fstn_conv2_weight, Dfeat_fstn_conv2_bias, feat_fstn_conv2_in_channls,\
                    Dfeat_fstn_bn2_mean, Dfeat_fstn_bn2_var, Dfeat_fstn_bn2_gamma, Dfeat_fstn_bn2_beta);
        conv_bn_relu<<<feat_fstn_conv3_grid_size, feat_fstn_conv3_block_size>>>(\
                    Dfeat_fstn_conv2_output, Dfeat_fstn_conv3_output,\
                    Dfeat_fstn_conv3_weight, Dfeat_fstn_conv3_bias, feat_fstn_conv3_in_channls,\
                    Dfeat_fstn_bn3_mean, Dfeat_fstn_bn3_var, Dfeat_fstn_bn3_gamma, Dfeat_fstn_bn3_beta);
        my_max<<<feat_fstn_max_grid_size, feat_fstn_max_block_size>>>(\
                    Dfeat_fstn_conv3_output, Dfeat_fstn_max_output,n_points);
        fc_bn_relu<<<feat_fstn_fc1_grid_size, feat_fstn_fc1_block_size>>>(\
                    Dfeat_fstn_max_output, Dfeat_fstn_fc1_output,\
                    Dfeat_fstn_fc1_weight, Dfeat_fstn_fc1_bias, feat_fstn_fc1_in_channls,feat_fstn_fc1_out_channls,\
                    Dfeat_fstn_bn4_mean, Dfeat_fstn_bn4_var, Dfeat_fstn_bn4_gamma, Dfeat_fstn_bn4_beta);
        fc_bn_relu<<<feat_fstn_fc2_grid_size, feat_fstn_fc2_block_size>>>(\
                    Dfeat_fstn_fc1_output, Dfeat_fstn_fc2_output,\
                    Dfeat_fstn_fc2_weight, Dfeat_fstn_fc2_bias, feat_fstn_fc2_in_channls,feat_fstn_fc2_out_channls,\
                    Dfeat_fstn_bn5_mean, Dfeat_fstn_bn5_var, Dfeat_fstn_bn5_gamma, Dfeat_fstn_bn5_beta);
        feat_fstn_fc3<<<feat_fstn_fc3_grid_size, feat_fstn_fc3_block_size>>>(\
                    Dfeat_fstn_fc2_output, Dfeat_fstn_fc3_output,\
                    Dfeat_fstn_fc3_weight, Dfeat_fstn_fc3_bias, feat_fstn_fc3_in_channls);//trans_feat
        feat_bmm<<<feat_bmm_trans_feat_grid_size, feat_bmm_trans_feat_block_size>>>(Dfeat_conv1_output, \
                    Dfeat_bmm_trans_feat_output, Dfeat_fstn_fc3_output);
        conv_bn_relu<<<feat_conv2_grid_size, feat_conv2_block_size>>>(\
                    Dfeat_bmm_trans_feat_output, Dfeat_conv2_output,\
                    Dfeat_conv2_weight, Dfeat_conv2_bias, feat_conv2_in_channls,\
                    Dfeat_bn2_mean, Dfeat_bn2_var, Dfeat_bn2_gamma, Dfeat_bn2_beta);
        conv_bn<<<feat_conv3_grid_size, feat_conv3_block_size>>>(\
                    Dfeat_conv2_output, Dfeat_conv3_output,\
                    Dfeat_conv3_weight, Dfeat_conv3_bias, feat_conv3_in_channls,\
                    Dfeat_bn3_mean, Dfeat_bn3_var, Dfeat_bn3_gamma, Dfeat_bn3_beta);
        my_max<<<feat_max_grid_size, feat_max_block_size>>>(\
                    Dfeat_conv3_output, Dfeat_max_output,n_points);
        fc_bn_relu<<<feat_fc1_grid_size, feat_fc1_block_size>>>(\
                    Dfeat_max_output, Dfc1_output,\
                    Dfc1_weight, Dfc1_bias, fc1_in_channls,fc1_out_channls,\
                    Dbn1_mean, Dbn1_var, Dbn1_gamma, Dbn1_beta);
        fc_bn_relu<<<feat_fc2_grid_size, feat_fc2_block_size>>>(\
                    Dfc1_output, Dfc2_output,\
                    Dfc2_weight, Dfc2_bias, fc2_in_channls,fc2_out_channls,\
                    Dbn2_mean, Dbn2_var, Dbn2_gamma, Dbn2_beta);
        fc_softmax_log<<<feat_fc3_grid_size, feat_fc3_block_size>>>(\
                    Dfc2_output, Dfc3_output,\
                    Dfc3_weight, Dfc3_bias, fc3_in_channls,fc3_out_channls);
        

        memset(result_out_gpu, 0, batch_size*10*sizeof(half));
        CHECK(cudaMemcpy(result_out_gpu, Dfc3_output, batch_size*10*sizeof(half), cudaMemcpyDeviceToHost))
        printf("batch %d[",i);
        for(int b=0;b<batch_size;b++){
            if(findMaxIdx(&result_out_gpu[b*10])==label_batch[i][b]){
                printf(GREEN " √ " RESET);
                count_true+=1;
            }
            else if(label_batch[i][b]!=-1)
                printf(RED " ×" RESET);
        }
        printf("]\n");
    }
    cudaDeviceSynchronize();
    auto end = std::chrono::high_resolution_clock::now();
    std::chrono::duration<double> diff = end - start;
    std::cout << std::fixed << std::setprecision(4) << diff.count() <<' '<<count_true/1000.0;
    return 0;
}