phuochungus commited on
Commit
4986f6d
0 Parent(s):

fix broken git history

Browse files
.dockerignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ **/test.*
2
+ **/.vscode
3
+ **/__pycache__
4
+ **/.env
5
+ app/firebase_config.json
6
+ demo
7
+ **/*.mp4
8
+ **/*.jpg
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.ot filter=lfs diff=lfs merge=lfs -text
17
+ *.parquet filter=lfs diff=lfs merge=lfs -text
18
+ *.pb filter=lfs diff=lfs merge=lfs -text
19
+ *.pickle filter=lfs diff=lfs merge=lfs -text
20
+ *.pkl filter=lfs diff=lfs merge=lfs -text
21
+ *.pt filter=lfs diff=lfs merge=lfs -text
22
+ *.pth filter=lfs diff=lfs merge=lfs -text
23
+ *.rar filter=lfs diff=lfs merge=lfs -text
24
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
25
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.onnx filter=lfs diff=lfs merge=lfs -text
.github/workflows/deploy.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy On Hugging Face
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ deploy:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+ with:
15
+ fetch-depth: 0
16
+ lfs: true
17
+ - name: Push to hub
18
+ env:
19
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
20
+ run: git push https://phuochungus:$HF_TOKEN@huggingface.co/spaces/phuochungus/RTMDet_PRODUCTION main
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ **/test.*
2
+ **/.vscode
3
+ **/__pycache__
4
+ **/.env
5
+ app/firebase_config.json
6
+ demo
7
+ **/*.mp4
8
+ **/*.jpg
Dockerfile ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.8
2
+
3
+ #CREATE A USER
4
+ RUN useradd -m -u 1000 user
5
+
6
+ RUN apt-get update && apt-get install -y libgl1 ffmpeg
7
+
8
+ USER user
9
+
10
+ ENV HOME=/home/user \
11
+ PATH=/home/user/.local/bin:$PATH
12
+
13
+ RUN pip install --no-cache-dir --upgrade pip
14
+
15
+ #COPY SOURCE CODE
16
+ WORKDIR ${HOME}/app
17
+
18
+ COPY --chown=user . .
19
+
20
+ RUN pip install -r ./app/requirements.txt
21
+
22
+ COPY ./libs/image.py /home/user/.local/lib/python3.8/site-packages/mmcv/visualization/image.py
23
+
24
+ EXPOSE 3000
25
+
26
+ CMD [ "uvicorn", "app.main:app", "--host" ,"0.0.0.0" ,"--port", "3000"]
README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Rtmdet
3
+ emoji: 🐠
4
+ colorFrom: yellow
5
+ colorTo: pink
6
+ sdk: docker
7
+ pinned: false
8
+ app_port: 3000
9
+ ---
10
+
11
+ HOW TO RUN:
12
+ open compose.yaml file and mannually change environment variables
13
+ bash
14
+ ```
15
+ docker compose up -d
16
+ ```
17
+
18
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from mmdeploy_runtime import Detector
4
+ from supabase import create_client, Client
5
+ from firebase_admin import credentials, initialize_app
6
+ import json
7
+
8
+ import logging
9
+
10
+ logger = logging.getLogger("uvicorn")
11
+ logger.setLevel(logging.INFO)
12
+
13
+
14
+ load_dotenv()
15
+
16
+ model_path = "./model"
17
+ detector = Detector(model_path=model_path, device_name="cpu", device_id=0)
18
+
19
+ url: str = os.environ.get("SUPABASE_URL")
20
+ key: str = os.environ.get("SUPABASE_KEY")
21
+ supabase: Client = create_client(url, key)
22
+
23
+
24
+ firebase_app = initialize_app(
25
+ credential=credentials.Certificate(
26
+ json.loads(os.environ.get("FIREBASE_CREDENTIALS"))
27
+ )
28
+ )
app/constants.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ colors = [
2
+ (255, 0, 0),
3
+ (0, 255, 0),
4
+ (0, 0, 255),
5
+ (255, 255, 0),
6
+ (255, 0, 255),
7
+ (0, 255, 255),
8
+ (128, 0, 0),
9
+ (0, 128, 0),
10
+ (0, 0, 128),
11
+ (128, 128, 0),
12
+ (128, 0, 128),
13
+ (0, 128, 128),
14
+ (165, 42, 42),
15
+ (210, 105, 30),
16
+ (218, 165, 32),
17
+ (139, 69, 19),
18
+ (244, 164, 96),
19
+ (255, 99, 71),
20
+ (255, 127, 80),
21
+ (255, 69, 0),
22
+ (255, 140, 0),
23
+ (255, 215, 0),
24
+ (218, 112, 214),
25
+ (255, 182, 193),
26
+ (255, 192, 203),
27
+ (255, 20, 147),
28
+ (199, 21, 133),
29
+ (255, 105, 180),
30
+ (255, 0, 255),
31
+ (255, 250, 205),
32
+ (250, 128, 114),
33
+ (255, 99, 71),
34
+ (255, 69, 0),
35
+ (255, 140, 0),
36
+ (255, 215, 0),
37
+ (255, 223, 0),
38
+ (255, 182, 193),
39
+ (255, 192, 203),
40
+ (255, 20, 147),
41
+ (199, 21, 133),
42
+ (255, 105, 180),
43
+ (255, 0, 255),
44
+ (255, 250, 205),
45
+ (250, 128, 114),
46
+ (255, 99, 71),
47
+ (255, 69, 0),
48
+ (255, 140, 0),
49
+ (255, 215, 0),
50
+ (173, 255, 47),
51
+ (154, 205, 50),
52
+ (85, 107, 47),
53
+ (144, 238, 144),
54
+ (0, 128, 0),
55
+ (0, 255, 0),
56
+ (50, 205, 50),
57
+ (0, 250, 154),
58
+ (0, 255, 127),
59
+ (0, 128, 128),
60
+ (0, 139, 139),
61
+ (0, 206, 209),
62
+ (70, 130, 180),
63
+ (100, 149, 237),
64
+ (0, 0, 128),
65
+ (0, 0, 139),
66
+ (0, 0, 205),
67
+ (0, 0, 255),
68
+ (0, 191, 255),
69
+ (30, 144, 255),
70
+ (135, 206, 235),
71
+ (173, 216, 230),
72
+ (175, 238, 238),
73
+ (240, 248, 255),
74
+ (240, 255, 240),
75
+ (245, 245, 220),
76
+ (255, 228, 196),
77
+ (255, 235, 205),
78
+ (255, 239, 219),
79
+ (255, 245, 238),
80
+ (245, 222, 179),
81
+ (255, 248, 220),
82
+ (255, 250, 240),
83
+ (250, 250, 210),
84
+ (253, 245, 230),
85
+ ]
86
+
87
+ classNames = [
88
+ "person",
89
+ "bicycle",
90
+ "car",
91
+ "motorcycle",
92
+ "airplane",
93
+ "bus",
94
+ "train",
95
+ "truck",
96
+ "boat",
97
+ "traffic light",
98
+ "fire hydrant",
99
+ "stop sign",
100
+ "parking meter",
101
+ "bench",
102
+ "bird",
103
+ "cat",
104
+ "dog",
105
+ "horse",
106
+ "sheep",
107
+ "cow",
108
+ "elephant",
109
+ "bear",
110
+ "zebra",
111
+ "giraffe",
112
+ "backpack",
113
+ "umbrella",
114
+ "handbag",
115
+ "tie",
116
+ "suitcase",
117
+ "frisbee",
118
+ "skis",
119
+ "snowboard",
120
+ "sports ball",
121
+ "kite",
122
+ "baseball bat",
123
+ "baseball glove",
124
+ "skateboard",
125
+ "surfboard",
126
+ "tennis racket",
127
+ "bottle",
128
+ "wine glass",
129
+ "cup",
130
+ "fork",
131
+ "knife",
132
+ "spoon",
133
+ "bowl",
134
+ "banana",
135
+ "apple",
136
+ "sandwich",
137
+ "orange",
138
+ "broccoli",
139
+ "carrot",
140
+ "hot dog",
141
+ "pizza",
142
+ "donut",
143
+ "cake",
144
+ "chair",
145
+ "couch",
146
+ "potted plant",
147
+ "bed",
148
+ "dining table",
149
+ "toilet",
150
+ "tv",
151
+ "laptop",
152
+ "mouse",
153
+ "remote",
154
+ "keyboard",
155
+ "cell phone",
156
+ "microwave",
157
+ "oven",
158
+ "toaster",
159
+ "sink",
160
+ "refrigerator",
161
+ "book",
162
+ "clock",
163
+ "vase",
164
+ "scissors",
165
+ "teddy bear",
166
+ "hair drier",
167
+ "toothbrush",
168
+ ]
app/dependencies.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import Depends, HTTPException, status
2
+ from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
3
+ from jwt import InvalidTokenError
4
+ from app import logger
5
+ from firebase_admin import auth
6
+
7
+ security = HTTPBearer()
8
+
9
+
10
+ async def get_current_user(
11
+ credentials: HTTPAuthorizationCredentials = Depends(security),
12
+ ):
13
+ credentials_exception = HTTPException(
14
+ status_code=status.HTTP_401_UNAUTHORIZED,
15
+ detail="Could not validate credentials",
16
+ headers={"WWW-Authenticate": "Bearer"},
17
+ )
18
+ try:
19
+ payload = auth.verify_id_token(credentials.credentials)
20
+ except InvalidTokenError as e:
21
+ logger.info(e)
22
+ raise credentials_exception
23
+ except ValueError as e:
24
+ logger.info(e)
25
+ raise credentials_exception
26
+ except Exception as e:
27
+ logger.info(e)
28
+ print(e)
29
+ raise credentials_exception
30
+
31
+ return payload
app/main.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing_extensions import Annotated
2
+ from fastapi import Depends, FastAPI
3
+
4
+ from app.dependencies import get_current_user
5
+ from .routers import image, video
6
+
7
+
8
+ app = FastAPI()
9
+
10
+ app.include_router(image.router)
11
+ app.include_router(video.router)
12
+
13
+
14
+ @app.get("/me")
15
+ def getProfile(current_user: Annotated[any, Depends(get_current_user)]):
16
+ return current_user
17
+
18
+
19
+ @app.post("/login")
20
+ def login(email: str, password: str):
21
+ return "not implemented yet"
22
+
23
+
24
+ @app.get("/")
25
+ def hello():
26
+ return "Hello World!"
app/requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ fastapi==0.103.1
3
+ mmcv==2.0.1
4
+ mmdeploy_runtime==1.2.0
5
+ numpy==1.24.4
6
+ opencv_python==4.8.0.76
7
+ python-dotenv==1.0.0
8
+ Requests==2.31.0
9
+ supabase==1.0.4
10
+ uvicorn==0.23.2
11
+ python-multipart==0.0.6
12
+ firebase_admin==6.2.0
app/routers/__init__.py ADDED
File without changes
app/routers/image.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, File, Response, WebSocket, WebSocketDisconnect
2
+ import mmcv
3
+ import cv2
4
+ import numpy as np
5
+ from app.constants import classNames, colors
6
+ from app import detector
7
+
8
+
9
+ router = APIRouter(prefix="/image")
10
+
11
+
12
+ @router.post("")
13
+ async def handleImageRequest(
14
+ file: bytes = File(...),
15
+ threshold: float = 0.3,
16
+ raw: bool = False,
17
+ ):
18
+ img = mmcv.imfrombytes(file, cv2.IMREAD_COLOR)
19
+ if raw:
20
+ bboxes, labels = inferenceImage(img, threshold, raw)
21
+ return {"bboxes": bboxes.tolist(), "labels": labels.tolist()}
22
+
23
+ img = inferenceImage(img, threshold, raw)
24
+ ret, jpeg = cv2.imencode(".jpg", img)
25
+
26
+ if not ret:
27
+ return Response(content="Failed to encode image", status_code=500)
28
+ jpeg_bytes: bytes = jpeg.tobytes()
29
+
30
+ return Response(content=jpeg_bytes, media_type="image/jpeg")
31
+
32
+
33
+ def inferenceImage(img, threshold: float, isRaw: bool = False):
34
+ bboxes, labels, _ = detector(img)
35
+ if isRaw:
36
+ removeIndexs = []
37
+ for i, bbox in enumerate(bboxes):
38
+ if bbox[4] < threshold:
39
+ removeIndexs.append(i)
40
+
41
+ bboxes = np.delete(bboxes, removeIndexs, axis=0)
42
+ labels = np.delete(labels, removeIndexs)
43
+
44
+ return bboxes, labels
45
+ return mmcv.imshow_det_bboxes(
46
+ img=img,
47
+ bboxes=bboxes,
48
+ labels=labels,
49
+ class_names=classNames,
50
+ show=False,
51
+ colors=colors,
52
+ score_thr=threshold,
53
+ )
54
+
55
+
56
+ @router.websocket("/")
57
+ async def websocketEndpoint(websocket: WebSocket, threshold: float = 0.3):
58
+ await websocket.accept()
59
+ try:
60
+ while True:
61
+ data = await websocket.receive_bytes()
62
+ img = mmcv.imfrombytes(data, cv2.IMREAD_COLOR)
63
+ bboxes, labels = inferenceImage(img, threshold, True)
64
+ await websocket.send_json(
65
+ {"bboxes": bboxes.tolist(), "labels": labels.tolist()}
66
+ )
67
+ except WebSocketDisconnect:
68
+ pass
app/routers/video.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ from multiprocessing import Process
4
+ import os
5
+ import re
6
+ import shutil
7
+ import time
8
+ import aiofiles
9
+ import cv2
10
+ from fastapi import (
11
+ APIRouter,
12
+ Depends,
13
+ HTTPException,
14
+ UploadFile,
15
+ BackgroundTasks,
16
+ status,
17
+ )
18
+ import requests
19
+ from app import supabase
20
+ from app.dependencies import get_current_user
21
+ from app.routers.image import inferenceImage
22
+
23
+ router = APIRouter(prefix="/video")
24
+
25
+
26
+ @router.post("/{artifactId}")
27
+ async def handleVideoRequest(
28
+ artifactId: str,
29
+ file: UploadFile,
30
+ background_tasks: BackgroundTasks,
31
+ threshold: float = 0.3,
32
+ user=Depends(get_current_user),
33
+ ):
34
+ if re.search("^video\/", file.content_type) is None:
35
+ raise HTTPException(
36
+ status_code=status.HTTP_400_BAD_REQUEST,
37
+ detail="File must be video",
38
+ )
39
+
40
+ try:
41
+ id = str(now())
42
+ os.mkdir(id)
43
+ async with aiofiles.open(os.path.join(id, "input.mp4"), "wb") as out_file:
44
+ while content := await file.read(1024):
45
+ await out_file.write(content)
46
+ background_tasks.add_task(inferenceVideo, artifactId, id, threshold)
47
+ return id + ".mp4"
48
+ except ValueError as err:
49
+ print(err)
50
+ print("Error processing video")
51
+ shutil.rmtree(id)
52
+
53
+
54
+ def now():
55
+ return round(time.time() * 1000)
56
+
57
+
58
+ async def inferenceVideo(artifactId: str, inputDir: str, threshold: float):
59
+ try:
60
+ Process(updateArtifact(artifactId, {"status": "processing"})).start()
61
+ cap = cv2.VideoCapture(
62
+ filename=os.path.join(inputDir, "input.mp4"), apiPreference=cv2.CAP_FFMPEG
63
+ )
64
+ fps = cap.get(cv2.CAP_PROP_FPS)
65
+ size = (
66
+ int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
67
+ int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
68
+ )
69
+ result = cv2.VideoWriter(
70
+ filename=os.path.join(inputDir, "out.mp4"),
71
+ fourcc=cv2.VideoWriter_fourcc(*"mp4v"),
72
+ fps=fps,
73
+ frameSize=size,
74
+ )
75
+
76
+ isFirstFrame = True
77
+ thumbnail = None
78
+ while cap.isOpened():
79
+ res, frame = cap.read()
80
+ if isFirstFrame:
81
+ isFirstFrame = False
82
+ thumbnail = frame
83
+
84
+ if res == False:
85
+ break
86
+
87
+ resFram = inferenceImage(frame, threshold)
88
+ result.write(resFram)
89
+
90
+ cap.release()
91
+ result.release()
92
+
93
+ def createThumbnail(thumbnail):
94
+ thumbnail = cv2.resize(
95
+ src=thumbnail, dsize=(160, 160), interpolation=cv2.INTER_AREA
96
+ )
97
+ cv2.imwrite(os.path.join(inputDir, "thumbnail.jpg"), thumbnail)
98
+
99
+ createThumbnail(thumbnail)
100
+
101
+ async def uploadVideo():
102
+ async with aiofiles.open(os.path.join(inputDir, "out.mp4"), "rb") as f:
103
+ supabase.storage.from_("video").upload(
104
+ inputDir + ".mp4", await f.read(), {"content-type": "video/mp4"}
105
+ )
106
+
107
+ async def uploadThumbnail():
108
+ async with aiofiles.open(
109
+ os.path.join(inputDir, "thumbnail.jpg"), "rb"
110
+ ) as f:
111
+ supabase.storage.from_("thumbnail").upload(
112
+ inputDir + ".jpg", await f.read(), {"content-type": "image/jpeg"}
113
+ )
114
+
115
+ try:
116
+ n = now()
117
+ _, _ = await asyncio.gather(uploadVideo(), uploadThumbnail())
118
+ print(now() - n)
119
+ except Exception as e:
120
+ print(e)
121
+
122
+ updateArtifact(
123
+ artifactId,
124
+ {
125
+ "status": "success",
126
+ "path": "https://hdfxssmjuydwfwarxnfe.supabase.co/storage/v1/object/public/video/"
127
+ + inputDir
128
+ + ".mp4",
129
+ "thumbnailURL": "https://hdfxssmjuydwfwarxnfe.supabase.co/storage/v1/object/public/thumbnail/"
130
+ + inputDir
131
+ + ".jpg",
132
+ },
133
+ )
134
+ except:
135
+ Process(
136
+ updateArtifact(
137
+ artifactId,
138
+ {
139
+ "status": "fail",
140
+ },
141
+ )
142
+ ).start()
143
+ finally:
144
+ shutil.rmtree(inputDir)
145
+
146
+
147
+ def updateArtifact(artifactId: str, body):
148
+ url = "https://firebasetot.onrender.com/artifacts/" + artifactId
149
+ payload = json.dumps(body)
150
+ headers = {"Content-Type": "application/json"}
151
+ requests.request("PATCH", url, headers=headers, data=payload)
compose.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ app:
3
+ build: .
4
+ image: phuochungus/rtmdet
5
+ ports:
6
+ - "3000:3000"
7
+ environment:
8
+ - SUPABASE_URL=your-supabase-url
9
+ - SUPABASE_KEY=your-supabase-key
10
+ - FIREBASE_CREDENTIALS=your-firebase-credentials
libs/image.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from typing import List, Optional, Union
3
+
4
+ import cv2
5
+ import numpy as np
6
+
7
+ from mmcv.image import imread, imwrite
8
+ from .color import Color, color_val
9
+
10
+ # a type alias declares the optional types of color argument
11
+ ColorType = Union[Color, str, tuple, int, np.ndarray]
12
+
13
+
14
+ def imshow(img: Union[str, np.ndarray], win_name: str = "", wait_time: int = 0):
15
+ """Show an image.
16
+
17
+ Args:
18
+ img (str or ndarray): The image to be displayed.
19
+ win_name (str): The window name.
20
+ wait_time (int): Value of waitKey param.
21
+ """
22
+ cv2.imshow(win_name, imread(img))
23
+ if wait_time == 0: # prevent from hanging if windows was closed
24
+ while True:
25
+ ret = cv2.waitKey(1)
26
+
27
+ closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1
28
+ # if user closed window or if some key pressed
29
+ if closed or ret != -1:
30
+ break
31
+ else:
32
+ ret = cv2.waitKey(wait_time)
33
+
34
+
35
+ def imshow_bboxes(
36
+ img: Union[str, np.ndarray],
37
+ bboxes: Union[list, np.ndarray],
38
+ colors: ColorType = "green",
39
+ top_k: int = -1,
40
+ thickness: int = 1,
41
+ show: bool = True,
42
+ win_name: str = "",
43
+ wait_time: int = 0,
44
+ out_file: Optional[str] = None,
45
+ ):
46
+ """Draw bboxes on an image.
47
+
48
+ Args:
49
+ img (str or ndarray): The image to be displayed.
50
+ bboxes (list or ndarray): A list of ndarray of shape (k, 4).
51
+ colors (Color or str or tuple or int or ndarray): A list of colors.
52
+ top_k (int): Plot the first k bboxes only if set positive.
53
+ thickness (int): Thickness of lines.
54
+ show (bool): Whether to show the image.
55
+ win_name (str): The window name.
56
+ wait_time (int): Value of waitKey param.
57
+ out_file (str, optional): The filename to write the image.
58
+
59
+ Returns:
60
+ ndarray: The image with bboxes drawn on it.
61
+ """
62
+ img = imread(img)
63
+ img = np.ascontiguousarray(img)
64
+
65
+ if isinstance(bboxes, np.ndarray):
66
+ bboxes = [bboxes]
67
+ if not isinstance(colors, list):
68
+ colors = [colors for _ in range(len(bboxes))]
69
+ colors = [color_val(c) for c in colors]
70
+ assert len(bboxes) == len(colors)
71
+
72
+ for i, _bboxes in enumerate(bboxes):
73
+ _bboxes = _bboxes.astype(np.int32)
74
+ if top_k <= 0:
75
+ _top_k = _bboxes.shape[0]
76
+ else:
77
+ _top_k = min(top_k, _bboxes.shape[0])
78
+ for j in range(_top_k):
79
+ left_top = (_bboxes[j, 0], _bboxes[j, 1])
80
+ right_bottom = (_bboxes[j, 2], _bboxes[j, 3])
81
+ cv2.rectangle(img, left_top, right_bottom, colors[i], thickness=thickness)
82
+
83
+ if show:
84
+ imshow(img, win_name, wait_time)
85
+ if out_file is not None:
86
+ imwrite(img, out_file)
87
+ return img
88
+
89
+
90
+ def imshow_det_bboxes(
91
+ img: Union[str, np.ndarray],
92
+ bboxes: np.ndarray,
93
+ labels: np.ndarray,
94
+ class_names: List[str] = None,
95
+ score_thr: float = 0,
96
+ bbox_color: ColorType = "green",
97
+ text_color: ColorType = "green",
98
+ thickness: int = 1,
99
+ font_scale: float = 1,
100
+ show: bool = True,
101
+ win_name: str = "",
102
+ wait_time: int = 0,
103
+ out_file: Optional[str] = None,
104
+ colors: np.ndarray = None,
105
+ ):
106
+ """Draw bboxes and class labels (with scores) on an image.
107
+
108
+ Args:
109
+ img (str or ndarray): The image to be displayed.
110
+ bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
111
+ (n, 5).
112
+ labels (ndarray): Labels of bboxes.
113
+ class_names (list[str]): Names of each classes.
114
+ score_thr (float): Minimum score of bboxes to be shown.
115
+ bbox_color (Color or str or tuple or int or ndarray): Color
116
+ of bbox lines.
117
+ text_color (Color or str or tuple or int or ndarray): Color
118
+ of texts.
119
+ thickness (int): Thickness of lines.
120
+ font_scale (float): Font scales of texts.
121
+ show (bool): Whether to show the image.
122
+ win_name (str): The window name.
123
+ wait_time (int): Value of waitKey param.
124
+ out_file (str or None): The filename to write the image.
125
+ colors (array of tuple RGB int): the color of bbox and label of each class
126
+
127
+ Returns:
128
+ ndarray: The image with bboxes drawn on it.
129
+ """
130
+ assert bboxes.ndim == 2
131
+ assert labels.ndim == 1
132
+ assert bboxes.shape[0] == labels.shape[0]
133
+ assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
134
+ img = imread(img)
135
+
136
+ if score_thr > 0:
137
+ assert bboxes.shape[1] == 5
138
+ scores = bboxes[:, -1]
139
+ inds = scores > score_thr
140
+ bboxes = bboxes[inds, :]
141
+ labels = labels[inds]
142
+
143
+ bbox_color = color_val(bbox_color)
144
+ text_color = color_val(text_color)
145
+
146
+ for bbox, label in zip(bboxes, labels):
147
+ bbox_int = bbox.astype(np.int32)
148
+ left_top = (bbox_int[0], bbox_int[1])
149
+ right_bottom = (bbox_int[2], bbox_int[3])
150
+ if colors is not None:
151
+ bbox_color = text_color = color_val(colors[label])
152
+ cv2.rectangle(img, left_top, right_bottom, bbox_color, thickness=thickness)
153
+ label_text = class_names[label] if class_names is not None else f"cls {label}"
154
+ if len(bbox) > 4:
155
+ label_text += f"|{bbox[-1]:.02f}"
156
+ cv2.putText(
157
+ img,
158
+ label_text,
159
+ (bbox_int[0], bbox_int[1] - 2),
160
+ cv2.FONT_HERSHEY_TRIPLEX,
161
+ font_scale,
162
+ text_color,
163
+ 4
164
+ )
165
+
166
+ if show:
167
+ imshow(img, win_name, wait_time)
168
+ if out_file is not None:
169
+ imwrite(img, out_file)
170
+ return img
model/deploy.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.2.0",
3
+ "task": "Detector",
4
+ "models": [
5
+ {
6
+ "name": "rtmdet",
7
+ "net": "end2end.onnx",
8
+ "weights": "",
9
+ "backend": "onnxruntime",
10
+ "precision": "FP32",
11
+ "batch_size": 1,
12
+ "dynamic_shape": true
13
+ }
14
+ ],
15
+ "customs": []
16
+ }
model/detail.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.2.0",
3
+ "codebase": {
4
+ "task": "ObjectDetection",
5
+ "codebase": "mmdet",
6
+ "version": "3.1.0",
7
+ "pth": "rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth",
8
+ "config": "rtmdet_tiny_8xb32-300e_coco.py"
9
+ },
10
+ "codebase_config": {
11
+ "type": "mmdet",
12
+ "task": "ObjectDetection",
13
+ "model_type": "end2end",
14
+ "post_processing": {
15
+ "score_threshold": 0.05,
16
+ "confidence_threshold": 0.005,
17
+ "iou_threshold": 0.5,
18
+ "max_output_boxes_per_class": 200,
19
+ "pre_top_k": 5000,
20
+ "keep_top_k": 100,
21
+ "background_label_id": -1
22
+ }
23
+ },
24
+ "onnx_config": {
25
+ "type": "onnx",
26
+ "export_params": true,
27
+ "keep_initializers_as_inputs": false,
28
+ "opset_version": 11,
29
+ "save_file": "end2end.onnx",
30
+ "input_names": [
31
+ "input"
32
+ ],
33
+ "output_names": [
34
+ "dets",
35
+ "labels"
36
+ ],
37
+ "input_shape": null,
38
+ "optimize": true,
39
+ "dynamic_axes": {
40
+ "input": {
41
+ "0": "batch",
42
+ "2": "height",
43
+ "3": "width"
44
+ },
45
+ "dets": {
46
+ "0": "batch",
47
+ "1": "num_dets"
48
+ },
49
+ "labels": {
50
+ "0": "batch",
51
+ "1": "num_dets"
52
+ }
53
+ }
54
+ },
55
+ "backend_config": {
56
+ "type": "onnxruntime",
57
+ "precision": "fp16",
58
+ "common_config": {
59
+ "min_positive_val": 1e-07,
60
+ "max_finite_val": 10000.0,
61
+ "keep_io_types": false,
62
+ "disable_shape_infer": false,
63
+ "op_block_list": null,
64
+ "node_block_list": null
65
+ }
66
+ },
67
+ "calib_config": {}
68
+ }
model/end2end.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0efff65be17a13541a3b93d6b0cde60ed37f996c4f2d57aa38071e48117f217
3
+ size 22245735
model/pipeline.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pipeline": {
3
+ "input": [
4
+ "img"
5
+ ],
6
+ "output": [
7
+ "post_output"
8
+ ],
9
+ "tasks": [
10
+ {
11
+ "type": "Task",
12
+ "module": "Transform",
13
+ "name": "Preprocess",
14
+ "input": [
15
+ "img"
16
+ ],
17
+ "output": [
18
+ "prep_output"
19
+ ],
20
+ "transforms": [
21
+ {
22
+ "type": "LoadImageFromFile",
23
+ "backend_args": null
24
+ },
25
+ {
26
+ "type": "Resize",
27
+ "keep_ratio": true,
28
+ "size": [
29
+ 640,
30
+ 640
31
+ ]
32
+ },
33
+ {
34
+ "type": "Pad",
35
+ "size": [
36
+ 640,
37
+ 640
38
+ ],
39
+ "pad_val": {
40
+ "img": [
41
+ 114,
42
+ 114,
43
+ 114
44
+ ]
45
+ }
46
+ },
47
+ {
48
+ "type": "Normalize",
49
+ "to_rgb": false,
50
+ "mean": [
51
+ 103.53,
52
+ 116.28,
53
+ 123.675
54
+ ],
55
+ "std": [
56
+ 57.375,
57
+ 57.12,
58
+ 58.395
59
+ ]
60
+ },
61
+ {
62
+ "type": "Pad",
63
+ "size_divisor": 1
64
+ },
65
+ {
66
+ "type": "DefaultFormatBundle"
67
+ },
68
+ {
69
+ "type": "Collect",
70
+ "meta_keys": [
71
+ "img_path",
72
+ "flip",
73
+ "ori_filename",
74
+ "pad_param",
75
+ "pad_shape",
76
+ "scale_factor",
77
+ "valid_ratio",
78
+ "flip_direction",
79
+ "img_norm_cfg",
80
+ "img_id",
81
+ "img_shape",
82
+ "ori_shape",
83
+ "filename"
84
+ ],
85
+ "keys": [
86
+ "img"
87
+ ]
88
+ }
89
+ ]
90
+ },
91
+ {
92
+ "name": "rtmdet",
93
+ "type": "Task",
94
+ "module": "Net",
95
+ "is_batched": true,
96
+ "input": [
97
+ "prep_output"
98
+ ],
99
+ "output": [
100
+ "infer_output"
101
+ ],
102
+ "input_map": {
103
+ "img": "input"
104
+ },
105
+ "output_map": {}
106
+ },
107
+ {
108
+ "type": "Task",
109
+ "module": "mmdet",
110
+ "name": "postprocess",
111
+ "component": "ResizeBBox",
112
+ "params": {
113
+ "nms_pre": 1000,
114
+ "min_bbox_size": 0,
115
+ "score_thr": 0.3,
116
+ "nms": {
117
+ "type": "nms",
118
+ "iou_threshold": 0.65
119
+ },
120
+ "max_per_img": 100
121
+ },
122
+ "output": [
123
+ "post_output"
124
+ ],
125
+ "input": [
126
+ "prep_output",
127
+ "infer_output"
128
+ ]
129
+ }
130
+ ]
131
+ }
132
+ }