Spaces:
Runtime error
Runtime error
| Mat static_resize(Mat& img) { | |
| float r = min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0)); | |
| // r = std::min(r, 1.0f); | |
| int unpad_w = r * img.cols; | |
| int unpad_h = r * img.rows; | |
| Mat re(unpad_h, unpad_w, CV_8UC3); | |
| resize(img, re, re.size()); | |
| Mat out(INPUT_H, INPUT_W, CV_8UC3, Scalar(114, 114, 114)); | |
| re.copyTo(out(Rect(0, 0, re.cols, re.rows))); | |
| return out; | |
| } | |
| // YOLOX use the same focus in yolov5 | |
| class YoloV5Focus : public ncnn::Layer | |
| { | |
| public: | |
| YoloV5Focus() | |
| { | |
| one_blob_only = true; | |
| } | |
| virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const | |
| { | |
| int w = bottom_blob.w; | |
| int h = bottom_blob.h; | |
| int channels = bottom_blob.c; | |
| int outw = w / 2; | |
| int outh = h / 2; | |
| int outc = channels * 4; | |
| top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator); | |
| if (top_blob.empty()) | |
| return -100; | |
| for (int p = 0; p < outc; p++) | |
| { | |
| const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2); | |
| float* outptr = top_blob.channel(p); | |
| for (int i = 0; i < outh; i++) | |
| { | |
| for (int j = 0; j < outw; j++) | |
| { | |
| *outptr = *ptr; | |
| outptr += 1; | |
| ptr += 2; | |
| } | |
| ptr += w; | |
| } | |
| } | |
| return 0; | |
| } | |
| }; | |
| DEFINE_LAYER_CREATOR(YoloV5Focus) | |
| struct GridAndStride | |
| { | |
| int grid0; | |
| int grid1; | |
| int stride; | |
| }; | |
| static inline float intersection_area(const Object& a, const Object& b) | |
| { | |
| cv::Rect_<float> inter = a.rect & b.rect; | |
| return inter.area(); | |
| } | |
| static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right) | |
| { | |
| int i = left; | |
| int j = right; | |
| float p = faceobjects[(left + right) / 2].prob; | |
| while (i <= j) | |
| { | |
| while (faceobjects[i].prob > p) | |
| i++; | |
| while (faceobjects[j].prob < p) | |
| j--; | |
| if (i <= j) | |
| { | |
| // swap | |
| std::swap(faceobjects[i], faceobjects[j]); | |
| i++; | |
| j--; | |
| } | |
| } | |
| { | |
| { | |
| if (left < j) qsort_descent_inplace(faceobjects, left, j); | |
| } | |
| { | |
| if (i < right) qsort_descent_inplace(faceobjects, i, right); | |
| } | |
| } | |
| } | |
| static void qsort_descent_inplace(std::vector<Object>& objects) | |
| { | |
| if (objects.empty()) | |
| return; | |
| qsort_descent_inplace(objects, 0, objects.size() - 1); | |
| } | |
| static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold) | |
| { | |
| picked.clear(); | |
| const int n = faceobjects.size(); | |
| std::vector<float> areas(n); | |
| for (int i = 0; i < n; i++) | |
| { | |
| areas[i] = faceobjects[i].rect.area(); | |
| } | |
| for (int i = 0; i < n; i++) | |
| { | |
| const Object& a = faceobjects[i]; | |
| int keep = 1; | |
| for (int j = 0; j < (int)picked.size(); j++) | |
| { | |
| const Object& b = faceobjects[picked[j]]; | |
| // intersection over union | |
| float inter_area = intersection_area(a, b); | |
| float union_area = areas[i] + areas[picked[j]] - inter_area; | |
| // float IoU = inter_area / union_area | |
| if (inter_area / union_area > nms_threshold) | |
| keep = 0; | |
| } | |
| if (keep) | |
| picked.push_back(i); | |
| } | |
| } | |
| static void generate_grids_and_stride(const int target_w, const int target_h, std::vector<int>& strides, std::vector<GridAndStride>& grid_strides) | |
| { | |
| for (int i = 0; i < (int)strides.size(); i++) | |
| { | |
| int stride = strides[i]; | |
| int num_grid_w = target_w / stride; | |
| int num_grid_h = target_h / stride; | |
| for (int g1 = 0; g1 < num_grid_h; g1++) | |
| { | |
| for (int g0 = 0; g0 < num_grid_w; g0++) | |
| { | |
| GridAndStride gs; | |
| gs.grid0 = g0; | |
| gs.grid1 = g1; | |
| gs.stride = stride; | |
| grid_strides.push_back(gs); | |
| } | |
| } | |
| } | |
| } | |
| static void generate_yolox_proposals(std::vector<GridAndStride> grid_strides, const ncnn::Mat& feat_blob, float prob_threshold, std::vector<Object>& objects) | |
| { | |
| const int num_grid = feat_blob.h; | |
| const int num_class = feat_blob.w - 5; | |
| const int num_anchors = grid_strides.size(); | |
| const float* feat_ptr = feat_blob.channel(0); | |
| for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++) | |
| { | |
| const int grid0 = grid_strides[anchor_idx].grid0; | |
| const int grid1 = grid_strides[anchor_idx].grid1; | |
| const int stride = grid_strides[anchor_idx].stride; | |
| // yolox/models/yolo_head.py decode logic | |
| // outputs[..., :2] = (outputs[..., :2] + grids) * strides | |
| // outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides | |
| float x_center = (feat_ptr[0] + grid0) * stride; | |
| float y_center = (feat_ptr[1] + grid1) * stride; | |
| float w = exp(feat_ptr[2]) * stride; | |
| float h = exp(feat_ptr[3]) * stride; | |
| float x0 = x_center - w * 0.5f; | |
| float y0 = y_center - h * 0.5f; | |
| float box_objectness = feat_ptr[4]; | |
| for (int class_idx = 0; class_idx < num_class; class_idx++) | |
| { | |
| float box_cls_score = feat_ptr[5 + class_idx]; | |
| float box_prob = box_objectness * box_cls_score; | |
| if (box_prob > prob_threshold) | |
| { | |
| Object obj; | |
| obj.rect.x = x0; | |
| obj.rect.y = y0; | |
| obj.rect.width = w; | |
| obj.rect.height = h; | |
| obj.label = class_idx; | |
| obj.prob = box_prob; | |
| objects.push_back(obj); | |
| } | |
| } // class loop | |
| feat_ptr += feat_blob.w; | |
| } // point anchor loop | |
| } | |
| static int detect_yolox(ncnn::Mat& in_pad, std::vector<Object>& objects, ncnn::Extractor ex, float scale) | |
| { | |
| ex.input("images", in_pad); | |
| std::vector<Object> proposals; | |
| { | |
| ncnn::Mat out; | |
| ex.extract("output", out); | |
| static const int stride_arr[] = {8, 16, 32}; // might have stride=64 in YOLOX | |
| std::vector<int> strides(stride_arr, stride_arr + sizeof(stride_arr) / sizeof(stride_arr[0])); | |
| std::vector<GridAndStride> grid_strides; | |
| generate_grids_and_stride(INPUT_W, INPUT_H, strides, grid_strides); | |
| generate_yolox_proposals(grid_strides, out, YOLOX_CONF_THRESH, proposals); | |
| } | |
| // sort all proposals by score from highest to lowest | |
| qsort_descent_inplace(proposals); | |
| // apply nms with nms_threshold | |
| std::vector<int> picked; | |
| nms_sorted_bboxes(proposals, picked, YOLOX_NMS_THRESH); | |
| int count = picked.size(); | |
| objects.resize(count); | |
| for (int i = 0; i < count; i++) | |
| { | |
| objects[i] = proposals[picked[i]]; | |
| // adjust offset to original unpadded | |
| float x0 = (objects[i].rect.x) / scale; | |
| float y0 = (objects[i].rect.y) / scale; | |
| float x1 = (objects[i].rect.x + objects[i].rect.width) / scale; | |
| float y1 = (objects[i].rect.y + objects[i].rect.height) / scale; | |
| // clip | |
| // x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f); | |
| // y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f); | |
| // x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f); | |
| // y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f); | |
| objects[i].rect.x = x0; | |
| objects[i].rect.y = y0; | |
| objects[i].rect.width = x1 - x0; | |
| objects[i].rect.height = y1 - y0; | |
| } | |
| return 0; | |
| } | |
| int main(int argc, char** argv) | |
| { | |
| if (argc != 2) | |
| { | |
| fprintf(stderr, "Usage: %s [videopath]\n", argv[0]); | |
| return -1; | |
| } | |
| ncnn::Net yolox; | |
| //yolox.opt.use_vulkan_compute = true; | |
| //yolox.opt.use_bf16_storage = true; | |
| yolox.opt.num_threads = 20; | |
| //ncnn::set_cpu_powersave(0); | |
| //ncnn::set_omp_dynamic(0); | |
| //ncnn::set_omp_num_threads(20); | |
| // Focus in yolov5 | |
| yolox.register_custom_layer("YoloV5Focus", YoloV5Focus_layer_creator); | |
| yolox.load_param("bytetrack_s_op.param"); | |
| yolox.load_model("bytetrack_s_op.bin"); | |
| ncnn::Extractor ex = yolox.create_extractor(); | |
| const char* videopath = argv[1]; | |
| VideoCapture cap(videopath); | |
| if (!cap.isOpened()) | |
| return 0; | |
| int img_w = cap.get(CV_CAP_PROP_FRAME_WIDTH); | |
| int img_h = cap.get(CV_CAP_PROP_FRAME_HEIGHT); | |
| int fps = cap.get(CV_CAP_PROP_FPS); | |
| long nFrame = static_cast<long>(cap.get(CV_CAP_PROP_FRAME_COUNT)); | |
| cout << "Total frames: " << nFrame << endl; | |
| VideoWriter writer("demo.mp4", CV_FOURCC('m', 'p', '4', 'v'), fps, Size(img_w, img_h)); | |
| Mat img; | |
| BYTETracker tracker(fps, 30); | |
| int num_frames = 0; | |
| int total_ms = 1; | |
| for (;;) | |
| { | |
| if(!cap.read(img)) | |
| break; | |
| num_frames ++; | |
| if (num_frames % 20 == 0) | |
| { | |
| cout << "Processing frame " << num_frames << " (" << num_frames * 1000000 / total_ms << " fps)" << endl; | |
| } | |
| if (img.empty()) | |
| break; | |
| float scale = min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0)); | |
| Mat pr_img = static_resize(img); | |
| ncnn::Mat in_pad = ncnn::Mat::from_pixels_resize(pr_img.data, ncnn::Mat::PIXEL_BGR2RGB, INPUT_W, INPUT_H, INPUT_W, INPUT_H); | |
| // python 0-1 input tensor with rgb_means = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225) | |
| // so for 0-255 input image, rgb_mean should multiply 255 and norm should div by std. | |
| const float mean_vals[3] = {255.f * 0.485f, 255.f * 0.456, 255.f * 0.406f}; | |
| const float norm_vals[3] = {1 / (255.f * 0.229f), 1 / (255.f * 0.224f), 1 / (255.f * 0.225f)}; | |
| in_pad.substract_mean_normalize(mean_vals, norm_vals); | |
| std::vector<Object> objects; | |
| auto start = chrono::system_clock::now(); | |
| //detect_yolox(img, objects); | |
| detect_yolox(in_pad, objects, ex, scale); | |
| vector<STrack> output_stracks = tracker.update(objects); | |
| auto end = chrono::system_clock::now(); | |
| total_ms = total_ms + chrono::duration_cast<chrono::microseconds>(end - start).count(); | |
| for (int i = 0; i < output_stracks.size(); i++) | |
| { | |
| vector<float> tlwh = output_stracks[i].tlwh; | |
| bool vertical = tlwh[2] / tlwh[3] > 1.6; | |
| if (tlwh[2] * tlwh[3] > 20 && !vertical) | |
| { | |
| Scalar s = tracker.get_color(output_stracks[i].track_id); | |
| putText(img, format("%d", output_stracks[i].track_id), Point(tlwh[0], tlwh[1] - 5), | |
| 0, 0.6, Scalar(0, 0, 255), 2, LINE_AA); | |
| rectangle(img, Rect(tlwh[0], tlwh[1], tlwh[2], tlwh[3]), s, 2); | |
| } | |
| } | |
| putText(img, format("frame: %d fps: %d num: %d", num_frames, num_frames * 1000000 / total_ms, output_stracks.size()), | |
| Point(0, 30), 0, 0.6, Scalar(0, 0, 255), 2, LINE_AA); | |
| writer.write(img); | |
| char c = waitKey(1); | |
| if (c > 0) | |
| { | |
| break; | |
| } | |
| } | |
| cap.release(); | |
| cout << "FPS: " << num_frames * 1000000 / total_ms << endl; | |
| return 0; | |
| } | |