Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 1,629 Bytes
09a7c47 fddab62 09a7c47 fddab62 09a7c47 fddab62 09a7c47 fddab62 09a7c47 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import {
FilesetResolver,
ObjectDetector,
ObjectDetectorResult
} from "@mediapipe/tasks-vision"
export type VideoObjectDetector = (videoFrame: TexImageSource, timestamp: number) => Promise<ObjectDetectorResult>
const getObjectDetector = async (): Promise<VideoObjectDetector> => {
const vision = await FilesetResolver.forVisionTasks(
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@latest/wasm"
);
const objectDetector = await ObjectDetector.createFromOptions(vision, {
baseOptions: {
modelAssetPath: `https://storage.googleapis.com/mediapipe-tasks/object_detector/efficientdet_lite0_uint8.tflite`
},
scoreThreshold: 0.5,
runningMode: "VIDEO"
});
const detector: VideoObjectDetector = async (videoFrame: TexImageSource, timestamp: number): Promise<ObjectDetectorResult> => {
const result = objectDetector.detectForVideo(videoFrame, timestamp)
return result
}
return detector
}
const globalState: { detector?: VideoObjectDetector } = {};
(async () => {
globalState.detector = globalState.detector || (await getObjectDetector())
})();
export async function identifyFrame(frame: TexImageSource, timestamp: number): Promise<ObjectDetectorResult> {
console.log("identifyFrame: loading segmenter..")
globalState.detector = globalState.detector || (await getObjectDetector())
console.log("identifyFrame: segmenting..")
return globalState.detector(frame, timestamp)
}
// to run:
// see doc:
// https://developers.google.com/mediapipe/solutions/vision/image_segmenter/web_js#video
// imageSegmenter.segmentForVideo(video, startTimeMs, callbackForVideo);
|