#include <iostream>
#include <vector>
#include <chrono>
#include <iomanip>
#include <string>
#include <fstream>
#include <map>
#include <dirent.h>
#include <cstring>
#include <hdf5/serial/H5Cpp.h>
#include <algorithm>
#include <numeric> 
#include <cmath>
#include <random>
#define RESET   "\033[0m"
#define RED     "\033[31m"
#define GREEN   "\033[32m"
#define CHECK(call)\
{\
  const cudaError_t error=call;\
  if(error!=cudaSuccess)\
  {\
      printf(RED "ERROR: %s:%d," ,__FILE__,__LINE__);\
      printf("code:%d,reason:%s\n" RESET,error,cudaGetErrorString(error));\
      exit(1);\
  }\
}
void initDevice(int devNum)
{
  int dev = devNum;
  cudaDeviceProp deviceProp;
  CHECK(cudaGetDeviceProperties(&deviceProp,dev));
  printf(GREEN "Using device %d: %s\n" RESET ,dev,deviceProp.name);
  CHECK(cudaSetDevice(dev));

}
std::vector<std::string> get_files_in_directory(const std::string& dir) {
    std::vector<std::string> files;
    DIR* dp;
    struct dirent* entry;
    if ((dp = opendir(dir.c_str())) != NULL) {
        while ((entry = readdir(dp)) != NULL) {
            std::string filename = entry->d_name;
            if (filename.find(".txt") != std::string::npos) {
                files.push_back(filename);
            }
        }
        closedir(dp);
    } else {
        perror("opendir");
    }
    return files;
}
std::vector<float> read_param(const std::string& filepath) {
    std::vector<float> data;
    std::ifstream file(filepath);
    if (file.is_open()) {
        float value;
        while (file >> value) {
            data.push_back(value);
        }
        file.close();
    } else {
        std::cerr << "Unable to open file: " << filepath << std::endl;
    }
    return data;
}

std::map<std::string, std::vector<float>> read_params(std::string dir) {
    std::map<std::string, std::vector<float>> params;
    std::vector<std::string> param_files = get_files_in_directory(dir);
    for (const auto& file : param_files) {
        std::string filename = file.substr(0, file.find_last_of(".")); // 获取不带扩展名的文件名
        params[filename] = read_param(dir + "/" + file);
    }
    return params;
}

using namespace H5;
void read_h5_file(const std::string& file_path, std::vector<std::vector<float>>& list_of_points, std::vector<int>& list_of_labels) {
    try {
        H5File file(file_path, H5F_ACC_RDONLY);
        std::vector<std::string> dataset_names;
        hsize_t num_objs = file.getNumObjs();
        for (hsize_t i = 0; i < num_objs; i++) {
            dataset_names.push_back(file.getObjnameByIdx(i));
        }
        for (const auto& name : dataset_names) {
            DataSet dataset = file.openDataSet(name + "/points");
            DataSpace dataspace = dataset.getSpace();
            hsize_t dims[2];
            dataspace.getSimpleExtentDims(dims, NULL);
            std::vector<float> points(dims[0] * dims[1]);
            dataset.read(points.data(), PredType::NATIVE_FLOAT);
            list_of_points.push_back(points);
            Attribute label_attr = file.openGroup(name).openAttribute("label");
            int label;
            label_attr.read(PredType::NATIVE_INT, &label);
            list_of_labels.push_back(label);
        }
    } catch (FileIException& error) {
        error.printErrorStack();
    } catch (DataSetIException& error) {
        error.printErrorStack();
    } catch (DataSpaceIException& error) {
        error.printErrorStack();
    } catch (DataTypeIException& error) {
        error.printErrorStack();
    }
}
//todo conv_bn_relu
__global__ void conv_bn_relu(float* point_in, float* point_out,float* weight, float* bias,\
                                   int in_channls,float* mean, float* var, float* gamma, float* beta) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x)+blockIdx.y*blockDim.x*gridDim.x;
    point_out[out_idx] = 0;    //to 0
    for (int i = 0; i < in_channls; i++) {
        int in_idx = (i+ blockIdx.x*in_channls)+blockIdx.y*in_channls*gridDim.x;
        int weight_idx = (i+threadIdx.x*in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
    point_out[out_idx] = (point_out[out_idx] - mean[threadIdx.x]) / sqrt(var[threadIdx.x] + 1e-5);
    point_out[out_idx] = point_out[out_idx] * gamma[threadIdx.x] + beta[threadIdx.x];
    point_out[out_idx] = point_out[out_idx] > 0 ? point_out[out_idx] : 0;
}
//todo conv_bn
__global__ void conv_bn(float* point_in, float* point_out,float* weight, float* bias,\
                                   int in_channls,float* mean, float* var, float* gamma, float* beta) {

    int out_idx = (threadIdx.x +blockIdx.x*blockDim.x)+blockIdx.y*blockDim.x*gridDim.x;
    //to 0
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx = (i+ blockIdx.x* in_channls)+blockIdx.y*in_channls*gridDim.x;
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
    point_out[out_idx] = (point_out[out_idx] - mean[threadIdx.x]) / sqrt(var[threadIdx.x] + 1e-5);
    point_out[out_idx] = point_out[out_idx] * gamma[threadIdx.x] + beta[threadIdx.x];
}
//todo my_max
__global__ void my_max(float* point_in, float* point_out,int num_point) {
    int out_idx = threadIdx.x + blockIdx.x * blockDim.x;
    point_out[out_idx] = -100;
    for (int i = 0; i < num_point; i++) {
        int in_idx = (threadIdx.x+ (i+blockIdx.x*num_point)* blockDim.x);
        point_out[out_idx] = point_in[in_idx] > point_out[out_idx] ? point_in[in_idx] : point_out[out_idx];
    }
}
//todo fc.bn.relu
__global__ void fc_bn_relu(float* point_in, float* point_out, \
                             float* weight, float* bias,int in_channls,int out_channls,\
                             float* mean, float* var, float* gamma, float* beta) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x);
    //to 0 
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.x*in_channls);
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
    point_out[out_idx] = (point_out[out_idx] - mean[threadIdx.x]) / sqrt(var[threadIdx.x] + 1e-5);
    point_out[out_idx] = point_out[out_idx] * gamma[threadIdx.x] + beta[threadIdx.x];
    point_out[out_idx] = point_out[out_idx] > 0 ? point_out[out_idx] : 0;
}
//todo fc_eye
__global__ void fc_eye(float* point_in, float* point_out, \
                             float* weight, float* bias,int in_channls,int out_channls) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x);
    //to 0 
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.x*in_channls);
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
    point_out[out_idx] = (out_idx-blockIdx.x*3*3)%4==0 ? point_out[out_idx]+1 : point_out[out_idx];
}
//todo feat.fstn.fc3
__global__ void feat_fstn_fc3(float* point_in, float* point_out, \
                             float* weight, float* bias,int in_channls) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x)+blockIdx.y*(blockDim.x*gridDim.x);
    //to 0
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.y*in_channls);
        int weight_idx = (i+ (threadIdx.x+blockIdx.x*blockDim.x)* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x+blockIdx.x*blockDim.x];
    point_out[out_idx] = (out_idx-blockIdx.y*64*64)%65==0 ? point_out[out_idx]+1 : point_out[out_idx];
}
//todo fc_softmax_log
__global__ void fc_softmax_log(float* point_in, float* point_out, \
                             float* weight, float* bias,int in_channls,int out_channls) {
    int idx = threadIdx.x;
    for (int i = 0; i < in_channls; i++) {
        point_out[idx] += point_in[i] * weight[(i+ idx* in_channls)];
    }
    point_out[idx] += bias[idx];
    __syncthreads();
    __shared__ float max_val,sum_val;
    if(point_out[idx] > max_val) max_val = point_out[idx];
    __syncthreads();
    point_out[idx] = exp(point_out[idx] - max_val);
    sum_val+=point_out[idx];
    __syncthreads();
    point_out[idx] = log(point_out[idx]/sum_val);
}
//todo fc
__global__ void fc(float* point_in, float* point_out, \
                             float* weight, float* bias,int in_channls,int out_channls) {
    int out_idx = (threadIdx.x+blockIdx.x*blockDim.x);
    //to 0 
    point_out[out_idx] = 0;
    for (int i = 0; i < in_channls; i++) {
        int in_idx =(i+blockIdx.x*in_channls);
        int weight_idx = (i+ threadIdx.x* in_channls);
        point_out[out_idx] += point_in[in_idx] * weight[weight_idx];
    }
    point_out[out_idx] += bias[threadIdx.x];
}
//todo feat.bmm
__global__ void feat_bmm(float* point_in, float* point_out,float* trans) {
    int out_idx = (threadIdx.x+(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x);
    //to 0
    point_out[out_idx] = 0;
    for(int i=0;i<blockDim.x;i++){
        int in_idx=(i+(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x);
        int weight_idx = (threadIdx.x+(i+blockIdx.y*blockDim.x)*blockDim.x);
        point_out[out_idx] += point_in[in_idx]*trans[weight_idx];
    }
}
void process_points(std::vector<std::vector<float>>& list_of_points,int num_point) {
    std::random_device rd;
    std::mt19937 gen(rd());
    for (auto& points : list_of_points) {
        int n = points.size() / 3;
        if (n > num_point) {
            std::vector<int> indices(n);
            std::iota(indices.begin(), indices.end(), 0); // 填充索引
            std::shuffle(indices.begin(), indices.end(), gen);
            std::vector<float> new_points;
            new_points.reserve(num_point*3);
            for (int i = 0; i < num_point; ++i) {
                int idx = indices[i] * 3;
                new_points.push_back(points[idx]);
                new_points.push_back(points[idx + 1]);
                new_points.push_back(points[idx + 2]);
            }
            points = std::move(new_points);
        } else if (n < num_point*3) {
            std::vector<float> new_points(points.begin(), points.end());
            std::uniform_int_distribution<> dis(0, n - 1);
            while (new_points.size() / 3 < num_point) {
                int idx = dis(gen) * 3;
                new_points.push_back(new_points[idx]);
                new_points.push_back(new_points[idx + 1]);
                new_points.push_back(new_points[idx + 2]);
            }
            points = std::move(new_points);
        }
    }
}

std::vector<int> log_softmax_and_argmax(float* x, int batch, int n = 10) {
    std::vector<int> result(batch); 
    float max_val[batch]; 
    float sum[batch]; 
    float y[n * batch]; 
    for (int b = 0; b < batch; b++) {
        max_val[b] = -INFINITY;
        for (int i = 0; i < n; ++i) {
            max_val[b] = std::max(max_val[b], x[i + b * n]);
        }
    }
    for (int b = 0; b < batch; b++) {
        sum[b] = 0;
        for (int i = 0; i < n; ++i) {
            sum[b] += std::exp(x[i + b * n] - max_val[b]);
        }
    }
    for (int b = 0; b < batch; b++) {
        for (int i = 0; i < n; ++i) {
            y[i + b * n] = std::log(std::exp(x[i + b * n] - max_val[b]) / sum[b]);
        }
    }
    for (int b = 0; b < batch; b++) {
        int pred = 0;
        for (int i = 1; i < n; ++i) {
            if (y[i + b * n] > y[pred + b * n]) { 
                pred = i;
            }
        }
        result[b] = pred; 
    }
    return result;
}
int main(int argc, char *argv[]) {
        
    //todo read params and weight
    initDevice(0);
    auto params = read_params("./weight");
    std::string file_path = "./data/test_point_clouds.h5";
    std::vector<std::vector<float>> list_of_points;
    std::vector<int> list_of_labels;
    read_h5_file(file_path, list_of_points, list_of_labels);
    int kkk=0;
    int n_points= list_of_points[kkk].size()/3;
    int batch_size = 1;
    float* result_out_gpu=(float*)malloc(batch_size*10*sizeof(float));

    //todo prepare device memory
    float* raw_points;  // [batch, 3, n_points]
    CHECK(cudaMalloc((void**)&raw_points, batch_size*n_points*3*sizeof(float)));
    CHECK(cudaMemcpy(raw_points, list_of_points[kkk].data(), batch_size*n_points*3*sizeof(float), cudaMemcpyHostToDevice))
    

    //feat.stn.conv1
    int feat_stn_conv1_in_channls = 3;
    int feat_stn_conv1_out_channls=64;
    float* Dfeat_stn_conv1_weight;
    float* Dfeat_stn_conv1_bias;
    float* Dfeat_stn_conv1_output;
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv1_output, batch_size*feat_stn_conv1_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv1_weight, feat_stn_conv1_in_channls*feat_stn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv1_bias, feat_stn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_conv1_weight, params["feat.stn.conv1.weight"].data(), feat_stn_conv1_in_channls*feat_stn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_conv1_bias, params["feat.stn.conv1.bias"].data(), feat_stn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.stn.bn1
    float* Dfeat_stn_bn1_mean;
    float* Dfeat_stn_bn1_var;
    float* Dfeat_stn_bn1_gamma;
    float* Dfeat_stn_bn1_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn1_mean, feat_stn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn1_var, feat_stn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn1_gamma, feat_stn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn1_beta, feat_stn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_bn1_mean, params["feat.stn.bn1.running_mean"].data(), feat_stn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn1_var, params["feat.stn.bn1.running_var"].data(), feat_stn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn1_gamma, params["feat.stn.bn1.weight"].data(), feat_stn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn1_beta, params["feat.stn.bn1.bias"].data(), feat_stn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    // feat.stn.conv2
    int feat_stn_conv2_in_channls =64;
    int feat_stn_conv2_out_channls=128;
    float* Dfeat_stn_conv2_weight;
    float* Dfeat_stn_conv2_bias;
    float* Dfeat_stn_conv2_output;
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv2_output, batch_size*feat_stn_conv2_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv2_weight, feat_stn_conv2_in_channls*feat_stn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv2_bias, feat_stn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_conv2_weight, params["feat.stn.conv2.weight"].data(), feat_stn_conv2_in_channls*feat_stn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_conv2_bias, params["feat.stn.conv2.bias"].data(), feat_stn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    // feat.stn.bn2
    float* Dfeat_stn_bn2_mean;
    float* Dfeat_stn_bn2_var;
    float* Dfeat_stn_bn2_gamma;
    float* Dfeat_stn_bn2_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn2_mean, feat_stn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn2_var, feat_stn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn2_gamma, feat_stn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn2_beta, feat_stn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_bn2_mean, params["feat.stn.bn2.running_mean"].data(), feat_stn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn2_var, params["feat.stn.bn2.running_var"].data(), feat_stn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn2_gamma, params["feat.stn.bn2.weight"].data(), feat_stn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn2_beta, params["feat.stn.bn2.bias"].data(), feat_stn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.stn.conv3
    int feat_stn_conv3_in_channls =128;
    int feat_stn_conv3_out_channls=1024;
    float* Dfeat_stn_conv3_weight;
    float* Dfeat_stn_conv3_bias;
    float* Dfeat_stn_conv3_output;
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv3_output, batch_size*feat_stn_conv3_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv3_weight, feat_stn_conv3_in_channls*feat_stn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_conv3_bias, feat_stn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_conv3_weight, params["feat.stn.conv3.weight"].data(), feat_stn_conv3_in_channls*feat_stn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_conv3_bias, params["feat.stn.conv3.bias"].data(), feat_stn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.stn.bn3
    float* Dfeat_stn_bn3_mean;
    float* Dfeat_stn_bn3_var;
    float* Dfeat_stn_bn3_gamma;
    float* Dfeat_stn_bn3_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn3_mean, feat_stn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn3_var, feat_stn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn3_gamma, feat_stn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn3_beta, feat_stn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_bn3_mean, params["feat.stn.bn3.running_mean"].data(), feat_stn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn3_var, params["feat.stn.bn3.running_var"].data(), feat_stn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn3_gamma, params["feat.stn.bn3.weight"].data(), feat_stn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn3_beta, params["feat.stn.bn3.bias"].data(), feat_stn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.stn.max
    float* Dfeat_stn_max_output;
    CHECK(cudaMalloc((void**)&Dfeat_stn_max_output, batch_size*feat_stn_conv3_out_channls*sizeof(float)));    

    //feat.stn.fc1
    int feat_stn_fc1_in_channls =1024;
    int feat_stn_fc1_out_channls=512;
    float* Dfeat_stn_fc1_weight;
    float* Dfeat_stn_fc1_bias;
    float* Dfeat_stn_fc1_output;
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc1_output, batch_size*feat_stn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc1_weight, feat_stn_fc1_in_channls*feat_stn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc1_bias, feat_stn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_fc1_weight, params["feat.stn.fc1.weight"].data(), feat_stn_fc1_in_channls*feat_stn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_fc1_bias, params["feat.stn.fc1.bias"].data(), feat_stn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.stn.bn4
    float* Dfeat_stn_bn4_mean;
    float* Dfeat_stn_bn4_var;
    float* Dfeat_stn_bn4_gamma;
    float* Dfeat_stn_bn4_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn4_mean, feat_stn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn4_var, feat_stn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn4_gamma, feat_stn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn4_beta, feat_stn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_bn4_mean, params["feat.stn.bn4.running_mean"].data(), feat_stn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn4_var, params["feat.stn.bn4.running_var"].data(), feat_stn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn4_gamma, params["feat.stn.bn4.weight"].data(), feat_stn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn4_beta, params["feat.stn.bn4.bias"].data(), feat_stn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))


    //feat.stn.fc2
    int feat_stn_fc2_in_channls =512;
    int feat_stn_fc2_out_channls=256;
    float* Dfeat_stn_fc2_weight;
    float* Dfeat_stn_fc2_bias;
    float* Dfeat_stn_fc2_output;
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc2_output, batch_size*feat_stn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc2_weight, feat_stn_fc2_in_channls*feat_stn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc2_bias, feat_stn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_fc2_weight, params["feat.stn.fc2.weight"].data(), feat_stn_fc2_in_channls*feat_stn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_fc2_bias, params["feat.stn.fc2.bias"].data(), feat_stn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.stn.bn5
    float* Dfeat_stn_bn5_mean;
    float* Dfeat_stn_bn5_var;
    float* Dfeat_stn_bn5_gamma;
    float* Dfeat_stn_bn5_beta;
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn5_mean, feat_stn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn5_var, feat_stn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn5_gamma, feat_stn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_bn5_beta, feat_stn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_bn5_mean, params["feat.stn.bn5.running_mean"].data(), feat_stn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn5_var, params["feat.stn.bn5.running_var"].data(), feat_stn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn5_gamma, params["feat.stn.bn5.weight"].data(), feat_stn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_bn5_beta, params["feat.stn.bn5.bias"].data(), feat_stn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.stn.fc3
    int feat_stn_fc3_in_channls =256;
    int feat_stn_fc3_out_channls=9;
    float* Dfeat_stn_fc3_weight;
    float* Dfeat_stn_fc3_bias;
    float* Dfeat_stn_fc3_output;
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc3_output, batch_size*feat_stn_fc3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc3_weight, feat_stn_fc3_in_channls*feat_stn_fc3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_stn_fc3_bias, feat_stn_fc3_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_stn_fc3_weight, params["feat.stn.fc3.weight"].data(), feat_stn_fc3_in_channls*feat_stn_fc3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_stn_fc3_bias, params["feat.stn.fc3.bias"].data(), feat_stn_fc3_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.bmm.trans
    float* Dfeat_bmm_trans_output;
    CHECK(cudaMalloc((void**)&Dfeat_bmm_trans_output, batch_size*n_points*3*sizeof(float)));

    //feat.conv1
    int feat_conv1_in_channls =3;
    int feat_conv1_out_channls=64;
    float* Dfeat_conv1_weight;
    float* Dfeat_conv1_bias;
    float* Dfeat_conv1_output;
    CHECK(cudaMalloc((void**)&Dfeat_conv1_output, batch_size*feat_conv1_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_conv1_weight, feat_conv1_in_channls*feat_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_conv1_bias, feat_conv1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_conv1_weight, params["feat.conv1.weight"].data(), feat_conv1_in_channls*feat_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_conv1_bias, params["feat.conv1.bias"].data(), feat_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.bn1
    float* Dfeat_bn1_mean;
    float* Dfeat_bn1_var;
    float* Dfeat_bn1_gamma;
    float* Dfeat_bn1_beta;
    CHECK(cudaMalloc((void**)&Dfeat_bn1_mean, feat_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn1_var, feat_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn1_gamma, feat_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn1_beta, feat_conv1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_bn1_mean, params["feat.bn1.running_mean"].data(), feat_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn1_var, params["feat.bn1.running_var"].data(), feat_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn1_gamma, params["feat.bn1.weight"].data(), feat_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn1_beta, params["feat.bn1.bias"].data(), feat_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.fstn.conv1
    int feat_fstn_conv1_in_channls =64;
    int feat_fstn_conv1_out_channls=64;
    float* Dfeat_fstn_conv1_weight;
    float* Dfeat_fstn_conv1_bias;
    float* Dfeat_fstn_conv1_output;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv1_output, batch_size*feat_fstn_conv1_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv1_weight, feat_fstn_conv1_in_channls*feat_fstn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv1_bias, feat_fstn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_fstn_conv1_weight, params["feat.fstn.conv1.weight"].data(), feat_fstn_conv1_in_channls*feat_fstn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_conv1_bias, params["feat.fstn.conv1.bias"].data(), feat_fstn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.fstn.bn1
    float* Dfeat_fstn_bn1_mean;
    float* Dfeat_fstn_bn1_var;
    float* Dfeat_fstn_bn1_gamma;
    float* Dfeat_fstn_bn1_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn1_mean, feat_fstn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn1_var, feat_fstn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn1_gamma, feat_fstn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn1_beta, feat_fstn_conv1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_fstn_bn1_mean,  params["feat.fstn.bn1.running_mean"].data(), feat_fstn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn1_var,   params["feat.fstn.bn1.running_var"].data(), feat_fstn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn1_gamma, params["feat.fstn.bn1.weight"].data(), feat_fstn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn1_beta,  params["feat.fstn.bn1.bias"].data(), feat_fstn_conv1_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.fstn.conv2
    int feat_fstn_conv2_in_channls =64;
    int feat_fstn_conv2_out_channls=128;
    float* Dfeat_fstn_conv2_weight;
    float* Dfeat_fstn_conv2_bias;
    float* Dfeat_fstn_conv2_output;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv2_output, batch_size*feat_fstn_conv2_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv2_weight, feat_fstn_conv2_in_channls*feat_fstn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv2_bias, feat_fstn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_fstn_conv2_weight, params["feat.fstn.conv2.weight"].data(), feat_fstn_conv2_in_channls*feat_fstn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_conv2_bias, params["feat.fstn.conv2.bias"].data(), feat_fstn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.fstn.bn2
    float* Dfeat_fstn_bn2_mean;
    float* Dfeat_fstn_bn2_var;
    float* Dfeat_fstn_bn2_gamma;
    float* Dfeat_fstn_bn2_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn2_mean, feat_fstn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn2_var, feat_fstn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn2_gamma, feat_fstn_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn2_beta, feat_fstn_conv2_out_channls*sizeof(float))); 
    CHECK(cudaMemcpy(Dfeat_fstn_bn2_mean, params["feat.fstn.bn2.running_mean"].data(), feat_fstn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn2_var, params["feat.fstn.bn2.running_var"].data(), feat_fstn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn2_gamma, params["feat.fstn.bn2.weight"].data(), feat_fstn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn2_beta, params["feat.fstn.bn2.bias"].data(), feat_fstn_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.fstn.conv3
    int feat_fstn_conv3_in_channls =128;
    int feat_fstn_conv3_out_channls=1024;
    float* Dfeat_fstn_conv3_weight;
    float* Dfeat_fstn_conv3_bias;
    float* Dfeat_fstn_conv3_output;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv3_output, batch_size*feat_fstn_conv3_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv3_weight, feat_fstn_conv3_in_channls*feat_fstn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_conv3_bias, feat_fstn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_fstn_conv3_weight, params["feat.fstn.conv3.weight"].data(), feat_fstn_conv3_in_channls*feat_fstn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_conv3_bias, params["feat.fstn.conv3.bias"].data(), feat_fstn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.fstn.bn3
    float* Dfeat_fstn_bn3_mean;
    float* Dfeat_fstn_bn3_var;
    float* Dfeat_fstn_bn3_gamma;
    float* Dfeat_fstn_bn3_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn3_mean, feat_fstn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn3_var, feat_fstn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn3_gamma, feat_fstn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn3_beta, feat_fstn_conv3_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_fstn_bn3_mean, params["feat.fstn.bn3.running_mean"].data(), feat_fstn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn3_var, params["feat.fstn.bn3.running_var"].data(), feat_fstn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn3_gamma, params["feat.fstn.bn3.weight"].data(), feat_fstn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn3_beta, params["feat.fstn.bn3.bias"].data(), feat_fstn_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.fstn.max
    float* Dfeat_fstn_max_output;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_max_output, batch_size*feat_fstn_conv3_out_channls*sizeof(float)));  

    //feat.fstn.fc1
    int feat_fstn_fc1_in_channls =1024;
    int feat_fstn_fc1_out_channls=512;
    float* Dfeat_fstn_fc1_weight;
    float* Dfeat_fstn_fc1_bias;
    float* Dfeat_fstn_fc1_output;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc1_output, batch_size*feat_fstn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc1_weight, feat_fstn_fc1_in_channls*feat_fstn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc1_bias, feat_fstn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_fstn_fc1_weight, params["feat.fstn.fc1.weight"].data(), feat_fstn_fc1_in_channls*feat_fstn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_fc1_bias, params["feat.fstn.fc1.bias"].data(), feat_fstn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.fstn.bn4
    float* Dfeat_fstn_bn4_mean;
    float* Dfeat_fstn_bn4_var;
    float* Dfeat_fstn_bn4_gamma;
    float* Dfeat_fstn_bn4_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn4_mean, feat_fstn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn4_var, feat_fstn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn4_gamma, feat_fstn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn4_beta, feat_fstn_fc1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_fstn_bn4_mean, params["feat.fstn.bn4.running_mean"].data(), feat_fstn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn4_var, params["feat.fstn.bn4.running_var"].data(), feat_fstn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn4_gamma, params["feat.fstn.bn4.weight"].data(), feat_fstn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn4_beta, params["feat.fstn.bn4.bias"].data(), feat_fstn_fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    
    //feat.fstn.fc2
    int feat_fstn_fc2_in_channls =512;
    int feat_fstn_fc2_out_channls=256;
    float* Dfeat_fstn_fc2_weight;
    float* Dfeat_fstn_fc2_bias;
    float* Dfeat_fstn_fc2_output;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc2_output, batch_size*feat_fstn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc2_weight, feat_fstn_fc2_in_channls*feat_fstn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc2_bias, feat_fstn_fc2_out_channls*sizeof(float)));  
    CHECK(cudaMemcpy(Dfeat_fstn_fc2_weight, params["feat.fstn.fc2.weight"].data(), feat_fstn_fc2_in_channls*feat_fstn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_fc2_bias, params["feat.fstn.fc2.bias"].data(), feat_fstn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.fstn.bn5
    float* Dfeat_fstn_bn5_mean;
    float* Dfeat_fstn_bn5_var;
    float* Dfeat_fstn_bn5_gamma;
    float* Dfeat_fstn_bn5_beta;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn5_mean, feat_fstn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn5_var, feat_fstn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn5_gamma, feat_fstn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_bn5_beta, feat_fstn_fc2_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_fstn_bn5_mean, params["feat.fstn.bn5.running_mean"].data(), feat_fstn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn5_var, params["feat.fstn.bn5.running_var"].data(), feat_fstn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn5_gamma, params["feat.fstn.bn5.weight"].data(), feat_fstn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_bn5_beta, params["feat.fstn.bn5.bias"].data(), feat_fstn_fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    
    //feat.fstn.fc3
    int feat_fstn_fc3_in_channls =256;
    int feat_fstn_fc3_out_channls=64*64;
    float* Dfeat_fstn_fc3_weight;
    float* Dfeat_fstn_fc3_bias;
    float* Dfeat_fstn_fc3_output;
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc3_output, batch_size*feat_fstn_fc3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc3_weight, feat_fstn_fc3_in_channls*feat_fstn_fc3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_fstn_fc3_bias, feat_fstn_fc3_out_channls*sizeof(float)));  
    CHECK(cudaMemcpy(Dfeat_fstn_fc3_weight, params["feat.fstn.fc3.weight"].data(), feat_fstn_fc3_in_channls*feat_fstn_fc3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_fstn_fc3_bias, params["feat.fstn.fc3.bias"].data(), feat_fstn_fc3_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.bmm1.trans.feat
    float* Dfeat_bmm_trans_feat_output;
    CHECK(cudaMalloc((void**)&Dfeat_bmm_trans_feat_output, batch_size*n_points*64*sizeof(float)));

    //feat.conv2
    int feat_conv2_in_channls =64;
    int feat_conv2_out_channls=128;
    float* Dfeat_conv2_weight;
    float* Dfeat_conv2_bias;
    float* Dfeat_conv2_output;
    CHECK(cudaMalloc((void**)&Dfeat_conv2_output, batch_size*feat_conv2_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_conv2_weight, feat_conv2_in_channls*feat_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_conv2_bias, feat_conv2_out_channls*sizeof(float)));  
    CHECK(cudaMemcpy(Dfeat_conv2_weight, params["feat.conv2.weight"].data(), feat_conv2_in_channls*feat_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_conv2_bias, params["feat.conv2.bias"].data(), feat_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.bn2
    float* Dfeat_bn2_mean;
    float* Dfeat_bn2_var;
    float* Dfeat_bn2_gamma;
    float* Dfeat_bn2_beta;
    CHECK(cudaMalloc((void**)&Dfeat_bn2_mean, feat_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn2_var, feat_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn2_gamma, feat_conv2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn2_beta, feat_conv2_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_bn2_mean, params["feat.bn2.running_mean"].data(), feat_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn2_var, params["feat.bn2.running_var"].data(), feat_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn2_gamma, params["feat.bn2.weight"].data(), feat_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn2_beta, params["feat.bn2.bias"].data(), feat_conv2_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.conv3
    int feat_conv3_in_channls =128;
    int feat_conv3_out_channls=1024;
    float* Dfeat_conv3_weight;
    float* Dfeat_conv3_bias;
    float* Dfeat_conv3_output;
    CHECK(cudaMalloc((void**)&Dfeat_conv3_output, batch_size*feat_conv3_out_channls*n_points*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_conv3_weight, feat_conv3_in_channls*feat_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_conv3_bias, feat_conv3_out_channls*sizeof(float)));  
    CHECK(cudaMemcpy(Dfeat_conv3_weight, params["feat.conv3.weight"].data(), feat_conv3_in_channls*feat_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_conv3_bias, params["feat.conv3.bias"].data(), feat_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //feat.bn3
    float* Dfeat_bn3_mean;
    float* Dfeat_bn3_var;
    float* Dfeat_bn3_gamma;
    float* Dfeat_bn3_beta;
    CHECK(cudaMalloc((void**)&Dfeat_bn3_mean, feat_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn3_var, feat_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn3_gamma, feat_conv3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfeat_bn3_beta, feat_conv3_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dfeat_bn3_mean, params["feat.bn3.running_mean"].data(), feat_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn3_var, params["feat.bn3.running_var"].data(), feat_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn3_gamma, params["feat.bn3.weight"].data(), feat_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfeat_bn3_beta, params["feat.bn3.bias"].data(), feat_conv3_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //feat.max
    float* Dfeat_max_output;
    CHECK(cudaMalloc((void**)&Dfeat_max_output, batch_size*feat_conv3_out_channls*sizeof(float)));

    //fc1
    int fc1_in_channls =1024;
    int fc1_out_channls=512;
    float* Dfc1_weight;
    float* Dfc1_bias;
    float* Dfc1_output;
    CHECK(cudaMalloc((void**)&Dfc1_output, batch_size*fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfc1_weight, fc1_in_channls*fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfc1_bias, fc1_out_channls*sizeof(float)));  
    CHECK(cudaMemcpy(Dfc1_weight, params["fc1.weight"].data(), fc1_in_channls*fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfc1_bias, params["fc1.bias"].data(), fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //bn1
    float* Dbn1_mean;
    float* Dbn1_var;
    float* Dbn1_gamma;
    float* Dbn1_beta;
    CHECK(cudaMalloc((void**)&Dbn1_mean, fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dbn1_var, fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dbn1_gamma, fc1_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dbn1_beta, fc1_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dbn1_mean, params["bn1.running_mean"].data(), fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn1_var, params["bn1.running_var"].data(), fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn1_gamma, params["bn1.weight"].data(), fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn1_beta, params["bn1.bias"].data(), fc1_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //fc2
    int fc2_in_channls =512;
    int fc2_out_channls=256;
    float* Dfc2_weight;
    float* Dfc2_bias;
    float* Dfc2_output;
    CHECK(cudaMalloc((void**)&Dfc2_output, batch_size*fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfc2_weight, fc2_in_channls*fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfc2_bias, fc2_out_channls*sizeof(float)));  
    CHECK(cudaMemcpy(Dfc2_weight, params["fc2.weight"].data(), fc2_in_channls*fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfc2_bias, params["fc2.bias"].data(), fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    //bn2
    float* Dbn2_mean;
    float* Dbn2_var;
    float* Dbn2_gamma;
    float* Dbn2_beta;
    CHECK(cudaMalloc((void**)&Dbn2_mean, fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dbn2_var, fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dbn2_gamma, fc2_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dbn2_beta, fc2_out_channls*sizeof(float)));
    CHECK(cudaMemcpy(Dbn2_mean, params["bn2.running_mean"].data(), fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn2_var, params["bn2.running_var"].data(), fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn2_gamma, params["bn2.weight"].data(), fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dbn2_beta, params["bn2.bias"].data(), fc2_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //fc3
    int fc3_in_channls =256;
    int fc3_out_channls=10;
    float* Dfc3_weight;
    float* Dfc3_bias;
    float* Dfc3_output;
    CHECK(cudaMalloc((void**)&Dfc3_output, batch_size*fc3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfc3_weight, fc3_in_channls*fc3_out_channls*sizeof(float)));
    CHECK(cudaMalloc((void**)&Dfc3_bias, fc3_out_channls*sizeof(float)));  
    CHECK(cudaMemcpy(Dfc3_weight, params["fc3.weight"].data(), fc3_in_channls*fc3_out_channls*sizeof(float), cudaMemcpyHostToDevice))
    CHECK(cudaMemcpy(Dfc3_bias, params["fc3.bias"].data(), fc3_out_channls*sizeof(float), cudaMemcpyHostToDevice))

    //todo kernel
    dim3 feat_stn_conv1_block_size(feat_stn_conv1_out_channls);
    dim3 feat_stn_conv1_grid_size(n_points,batch_size);
    dim3 feat_stn_conv2_block_size(feat_stn_conv2_out_channls);
    dim3 feat_stn_conv2_grid_size(n_points,batch_size);
    dim3 feat_stn_conv3_block_size(feat_stn_conv3_out_channls);
    dim3 feat_stn_conv3_grid_size(n_points,batch_size);
    dim3 feat_stn_max_block_size(feat_stn_conv3_out_channls);
    dim3 feat_stn_max_grid_size(batch_size);
    dim3 feat_stn_fc1_block_size(feat_stn_fc1_out_channls);
    dim3 feat_stn_fc1_grid_size(batch_size);
    dim3 feat_stn_fc2_block_size(feat_stn_fc2_out_channls);
    dim3 feat_stn_fc2_grid_size(batch_size);
    dim3 feat_stn_fc3_block_size(feat_stn_fc3_out_channls);
    dim3 feat_stn_fc3_grid_size(batch_size);
    dim3 feat_bmm_trans_block_size(3);
    dim3 feat_bmm_trans_grid_size(n_points,batch_size);
    dim3 feat_conv1_block_size(feat_conv1_out_channls);
    dim3 feat_conv1_grid_size(n_points,batch_size);
    dim3 feat_fstn_conv1_block_size(feat_fstn_conv1_out_channls);
    dim3 feat_fstn_conv1_grid_size(n_points,batch_size);
    dim3 feat_fstn_conv2_block_size(feat_fstn_conv2_out_channls);
    dim3 feat_fstn_conv2_grid_size(n_points,batch_size);
    dim3 feat_fstn_conv3_block_size(feat_fstn_conv3_out_channls);
    dim3 feat_fstn_conv3_grid_size(n_points,batch_size);
    dim3 feat_fstn_max_block_size(feat_fstn_conv3_out_channls);
    dim3 feat_fstn_max_grid_size(batch_size);
    dim3 feat_fstn_fc1_block_size(feat_fstn_fc1_out_channls);
    dim3 feat_fstn_fc1_grid_size(batch_size);
    dim3 feat_fstn_fc2_block_size(feat_fstn_fc2_out_channls);
    dim3 feat_fstn_fc2_grid_size(batch_size);
    dim3 feat_fstn_fc3_block_size(1024);
    dim3 feat_fstn_fc3_grid_size(4,batch_size);
    dim3 feat_bmm_trans_feat_block_size(64);
    dim3 feat_bmm_trans_feat_grid_size(n_points,batch_size);
    dim3 feat_conv2_block_size(feat_conv2_out_channls);
    dim3 feat_conv2_grid_size(n_points,batch_size);
    dim3 feat_conv3_block_size(feat_conv3_out_channls);
    dim3 feat_conv3_grid_size(n_points,batch_size);
    dim3 feat_max_block_size(feat_conv3_out_channls);
    dim3 feat_max_grid_size( batch_size);
    dim3 feat_fc1_block_size(fc1_out_channls);
    dim3 feat_fc1_grid_size(batch_size);
    dim3 feat_fc2_block_size(fc2_out_channls);
    dim3 feat_fc2_grid_size(batch_size);
    dim3 feat_fc3_block_size(fc3_out_channls);
    dim3 feat_fc3_grid_size(batch_size);

    auto start = std::chrono::high_resolution_clock::now();
    for(int re=200, i=0; i<re; i++){   
    
    //todo run kernel
    conv_bn_relu<<<feat_stn_conv1_grid_size, feat_stn_conv1_block_size>>>(\
                raw_points, Dfeat_stn_conv1_output,\
                Dfeat_stn_conv1_weight, Dfeat_stn_conv1_bias, feat_stn_conv1_in_channls, \
                Dfeat_stn_bn1_mean, Dfeat_stn_bn1_var, Dfeat_stn_bn1_gamma, Dfeat_stn_bn1_beta);
    conv_bn_relu<<<feat_stn_conv2_grid_size, feat_stn_conv2_block_size>>>(\
                Dfeat_stn_conv1_output, Dfeat_stn_conv2_output,\
                Dfeat_stn_conv2_weight, Dfeat_stn_conv2_bias, feat_stn_conv2_in_channls, \
                Dfeat_stn_bn2_mean, Dfeat_stn_bn2_var, Dfeat_stn_bn2_gamma, Dfeat_stn_bn2_beta);
    conv_bn_relu<<<feat_stn_conv3_grid_size, feat_stn_conv3_block_size>>>(\
                Dfeat_stn_conv2_output, Dfeat_stn_conv3_output,\
                Dfeat_stn_conv3_weight, Dfeat_stn_conv3_bias, feat_stn_conv3_in_channls, \
                Dfeat_stn_bn3_mean, Dfeat_stn_bn3_var, Dfeat_stn_bn3_gamma, Dfeat_stn_bn3_beta);
    my_max<<<feat_stn_max_grid_size, feat_stn_max_block_size>>>(Dfeat_stn_conv3_output, Dfeat_stn_max_output,n_points);
    fc_bn_relu<<<feat_stn_fc1_grid_size, feat_stn_fc1_block_size>>>(\
                Dfeat_stn_max_output, Dfeat_stn_fc1_output,\
                Dfeat_stn_fc1_weight, Dfeat_stn_fc1_bias, feat_stn_fc1_in_channls,feat_stn_fc1_out_channls,\
                Dfeat_stn_bn4_mean, Dfeat_stn_bn4_var, Dfeat_stn_bn4_gamma, Dfeat_stn_bn4_beta);
    fc_bn_relu<<<feat_stn_fc2_grid_size, feat_stn_fc2_block_size>>>(\
                Dfeat_stn_fc1_output, Dfeat_stn_fc2_output,\
                Dfeat_stn_fc2_weight, Dfeat_stn_fc2_bias, feat_stn_fc2_in_channls,feat_stn_fc2_out_channls,\
                Dfeat_stn_bn5_mean, Dfeat_stn_bn5_var, Dfeat_stn_bn5_gamma, Dfeat_stn_bn5_beta);
    fc_eye<<<feat_stn_fc3_grid_size, feat_stn_fc3_block_size>>>(\
                Dfeat_stn_fc2_output, Dfeat_stn_fc3_output,\
                Dfeat_stn_fc3_weight, Dfeat_stn_fc3_bias, feat_stn_fc3_in_channls,feat_stn_fc3_out_channls);
    feat_bmm<<<feat_bmm_trans_grid_size, feat_bmm_trans_block_size>>>(raw_points, Dfeat_bmm_trans_output, Dfeat_stn_fc3_output);
    conv_bn_relu<<<feat_conv1_grid_size, feat_conv1_block_size>>>(\
                Dfeat_bmm_trans_output, Dfeat_conv1_output,\
                Dfeat_conv1_weight, Dfeat_conv1_bias, feat_conv1_in_channls,\
                Dfeat_bn1_mean, Dfeat_bn1_var, Dfeat_bn1_gamma, Dfeat_bn1_beta);
    conv_bn_relu<<<feat_fstn_conv1_grid_size, feat_fstn_conv1_block_size>>>(\
                Dfeat_conv1_output, Dfeat_fstn_conv1_output,\
                Dfeat_fstn_conv1_weight, Dfeat_fstn_conv1_bias, feat_fstn_conv1_in_channls,\
                Dfeat_fstn_bn1_mean, Dfeat_fstn_bn1_var, Dfeat_fstn_bn1_gamma, Dfeat_fstn_bn1_beta);
    conv_bn_relu<<<feat_fstn_conv2_grid_size, feat_fstn_conv2_block_size>>>(\
                Dfeat_fstn_conv1_output, Dfeat_fstn_conv2_output,\
                Dfeat_fstn_conv2_weight, Dfeat_fstn_conv2_bias, feat_fstn_conv2_in_channls,\
                Dfeat_fstn_bn2_mean, Dfeat_fstn_bn2_var, Dfeat_fstn_bn2_gamma, Dfeat_fstn_bn2_beta);
    conv_bn_relu<<<feat_fstn_conv3_grid_size, feat_fstn_conv3_block_size>>>(\
                Dfeat_fstn_conv2_output, Dfeat_fstn_conv3_output,\
                Dfeat_fstn_conv3_weight, Dfeat_fstn_conv3_bias, feat_fstn_conv3_in_channls,\
                Dfeat_fstn_bn3_mean, Dfeat_fstn_bn3_var, Dfeat_fstn_bn3_gamma, Dfeat_fstn_bn3_beta);
    my_max<<<feat_fstn_max_grid_size, feat_fstn_max_block_size>>>(\
                Dfeat_fstn_conv3_output, Dfeat_fstn_max_output,n_points);
    fc_bn_relu<<<feat_fstn_fc1_grid_size, feat_fstn_fc1_block_size>>>(\
                Dfeat_fstn_max_output, Dfeat_fstn_fc1_output,\
                Dfeat_fstn_fc1_weight, Dfeat_fstn_fc1_bias, feat_fstn_fc1_in_channls,feat_fstn_fc1_out_channls,\
                Dfeat_fstn_bn4_mean, Dfeat_fstn_bn4_var, Dfeat_fstn_bn4_gamma, Dfeat_fstn_bn4_beta);
    fc_bn_relu<<<feat_fstn_fc2_grid_size, feat_fstn_fc2_block_size>>>(\
                Dfeat_fstn_fc1_output, Dfeat_fstn_fc2_output,\
                Dfeat_fstn_fc2_weight, Dfeat_fstn_fc2_bias, feat_fstn_fc2_in_channls,feat_fstn_fc2_out_channls,\
                Dfeat_fstn_bn5_mean, Dfeat_fstn_bn5_var, Dfeat_fstn_bn5_gamma, Dfeat_fstn_bn5_beta);
    feat_fstn_fc3<<<feat_fstn_fc3_grid_size, feat_fstn_fc3_block_size>>>(\
                Dfeat_fstn_fc2_output, Dfeat_fstn_fc3_output,\
                Dfeat_fstn_fc3_weight, Dfeat_fstn_fc3_bias, feat_fstn_fc3_in_channls);//trans_feat
    feat_bmm<<<feat_bmm_trans_feat_grid_size, feat_bmm_trans_feat_block_size>>>(Dfeat_conv1_output, \
                Dfeat_bmm_trans_feat_output, Dfeat_fstn_fc3_output);
    conv_bn_relu<<<feat_conv2_grid_size, feat_conv2_block_size>>>(\
                Dfeat_bmm_trans_feat_output, Dfeat_conv2_output,\
                Dfeat_conv2_weight, Dfeat_conv2_bias, feat_conv2_in_channls,\
                Dfeat_bn2_mean, Dfeat_bn2_var, Dfeat_bn2_gamma, Dfeat_bn2_beta);
    conv_bn<<<feat_conv3_grid_size, feat_conv3_block_size>>>(\
                Dfeat_conv2_output, Dfeat_conv3_output,\
                Dfeat_conv3_weight, Dfeat_conv3_bias, feat_conv3_in_channls,\
                Dfeat_bn3_mean, Dfeat_bn3_var, Dfeat_bn3_gamma, Dfeat_bn3_beta);
    my_max<<<feat_max_grid_size, feat_max_block_size>>>(\
                Dfeat_conv3_output, Dfeat_max_output,n_points);
    fc_bn_relu<<<feat_fc1_grid_size, feat_fc1_block_size>>>(\
                Dfeat_max_output, Dfc1_output,\
                Dfc1_weight, Dfc1_bias, fc1_in_channls,fc1_out_channls,\
                Dbn1_mean, Dbn1_var, Dbn1_gamma, Dbn1_beta);
    fc_bn_relu<<<feat_fc2_grid_size, feat_fc2_block_size>>>(\
                Dfc1_output, Dfc2_output,\
                Dfc2_weight, Dfc2_bias, fc2_in_channls,fc2_out_channls,\
                Dbn2_mean, Dbn2_var, Dbn2_gamma, Dbn2_beta);
    fc<<<feat_fc3_grid_size, feat_fc3_block_size>>>(\
                Dfc2_output, Dfc3_output,\
                Dfc3_weight, Dfc3_bias, fc3_in_channls,fc3_out_channls);
    
    memset(result_out_gpu, 0, batch_size*10*sizeof(float));
    CHECK(cudaMemcpy(result_out_gpu, Dfc3_output, batch_size*10*sizeof(float), cudaMemcpyDeviceToHost))

    std::vector<int> prednum=log_softmax_and_argmax(result_out_gpu, batch_size);
    if(prednum[0]==list_of_labels[kkk])
        printf(GREEN " √ " RESET);
    else 
        printf(RED " ×" RESET);
    
    }
    cudaDeviceSynchronize();
    auto end = std::chrono::high_resolution_clock::now();
    std::chrono::duration<double> diff = end - start;
    std::cout << std::fixed << std::setprecision(4) << diff.count()/200.0;
    return 0;
}