Alex Chan commited on
Commit
999c5c9
1 Parent(s): e424ddb

initial commit

Browse files
Files changed (40) hide show
  1. .gitignore +1 -0
  2. Code/Inference.py +275 -0
  3. Code/__pycache__/Inference.cpython-311.pyc +0 -0
  4. Repositories/DeepLabCut-live/.gitignore +139 -0
  5. Repositories/DeepLabCut-live/CITATION.cff +55 -0
  6. Repositories/DeepLabCut-live/LICENSE +666 -0
  7. Repositories/DeepLabCut-live/MANIFEST.in +1 -0
  8. Repositories/DeepLabCut-live/README.md +153 -0
  9. Repositories/DeepLabCut-live/benchmarking/run_dlclive_benchmark.py +43 -0
  10. Repositories/DeepLabCut-live/dlclive/__init__.py +11 -0
  11. Repositories/DeepLabCut-live/dlclive/benchmark.py +726 -0
  12. Repositories/DeepLabCut-live/dlclive/check_install/check_install.py +88 -0
  13. Repositories/DeepLabCut-live/dlclive/display.py +117 -0
  14. Repositories/DeepLabCut-live/dlclive/dlclive.py +480 -0
  15. Repositories/DeepLabCut-live/dlclive/exceptions.py +18 -0
  16. Repositories/DeepLabCut-live/dlclive/graph.py +138 -0
  17. Repositories/DeepLabCut-live/dlclive/pose.py +120 -0
  18. Repositories/DeepLabCut-live/dlclive/processor/README.md +21 -0
  19. Repositories/DeepLabCut-live/dlclive/processor/__init__.py +9 -0
  20. Repositories/DeepLabCut-live/dlclive/processor/kalmanfilter.py +144 -0
  21. Repositories/DeepLabCut-live/dlclive/processor/processor.py +23 -0
  22. Repositories/DeepLabCut-live/dlclive/utils.py +218 -0
  23. Repositories/DeepLabCut-live/dlclive/version.py +11 -0
  24. Repositories/DeepLabCut-live/docs/install_desktop.md +29 -0
  25. Repositories/DeepLabCut-live/docs/install_jetson.md +81 -0
  26. Repositories/DeepLabCut-live/example_processors/DogJumpLED/__init__.py +9 -0
  27. Repositories/DeepLabCut-live/example_processors/DogJumpLED/izzy_jump.py +143 -0
  28. Repositories/DeepLabCut-live/example_processors/DogJumpLED/izzy_jump_offline.py +123 -0
  29. Repositories/DeepLabCut-live/example_processors/DogJumpLED/teensy_leds/teensy_leds.ino +49 -0
  30. Repositories/DeepLabCut-live/example_processors/MouseLickLED/__init__.py +8 -0
  31. Repositories/DeepLabCut-live/example_processors/MouseLickLED/lick_led.py +85 -0
  32. Repositories/DeepLabCut-live/example_processors/MouseLickLED/teensy_leds/teensy_leds.ino +49 -0
  33. Repositories/DeepLabCut-live/example_processors/TeensyLaser/__init__.py +8 -0
  34. Repositories/DeepLabCut-live/example_processors/TeensyLaser/teensy_laser.py +86 -0
  35. Repositories/DeepLabCut-live/example_processors/TeensyLaser/teensy_laser/teensy_laser.ino +77 -0
  36. Repositories/DeepLabCut-live/poetry.lock +0 -0
  37. Repositories/DeepLabCut-live/pyproject.toml +46 -0
  38. Repositories/DeepLabCut-live/reinstall.sh +4 -0
  39. app.py +46 -0
  40. requirements.txt +8 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ Weights/
Code/Inference.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ inference on single image for MaskRCNN (FROM DETECTRON) + DLC
3
+
4
+ two step, pretrained MaskRCNN, then DLC
5
+
6
+ """
7
+ import cv2
8
+ import torch
9
+
10
+ import sys
11
+ sys.path.append("Repositories/DeepLabCut-live")
12
+
13
+ import deeplabcut as dlc
14
+ from dlclive import DLCLive, Processor
15
+
16
+ import matplotlib.pyplot as plt
17
+ import numpy as np
18
+ from tqdm import tqdm
19
+ import os
20
+ import shutil
21
+
22
+ import torchvision
23
+ from torchvision.transforms import transforms as transforms
24
+
25
+ import pickle
26
+
27
+ import detectron2
28
+
29
+ # import some common detectron2 utilities
30
+ from detectron2 import model_zoo
31
+ from detectron2.engine import DefaultPredictor
32
+ from detectron2.config import get_cfg
33
+ from detectron2.utils.visualizer import Visualizer
34
+ from detectron2.data import MetadataCatalog, DatasetCatalog
35
+
36
+ import cv2
37
+
38
+
39
+
40
+ COCO_INSTANCE_CATEGORY_NAMES = [
41
+ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
42
+ 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
43
+ 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
44
+ 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
45
+ 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
46
+ 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
47
+ 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
48
+ 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
49
+ 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
50
+ 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
51
+ 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
52
+ 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
53
+ ]
54
+
55
+
56
+ def Process_Crop(Crop, CropSize):
57
+ """Crop image and pad, if too big, will scale down """
58
+ # import ipdb;ipdb.set_trace()
59
+ if Crop.shape[0] > CropSize[0] or Crop.shape[1] > CropSize[1]: #Crop is bigger, scale down
60
+ ScaleProportion = min(CropSize[0]/Crop.shape[0],CropSize[1]/Crop.shape[1])
61
+
62
+ width_scaled = int(Crop.shape[1] * ScaleProportion)
63
+ height_scaled = int(Crop.shape[0] * ScaleProportion)
64
+ Crop = cv2.resize(Crop, (width_scaled,height_scaled), interpolation=cv2.INTER_LINEAR) # resize image
65
+
66
+ # Points2D = {k:[v[0]*ScaleProportion,v[1]*ScaleProportion] for k,v in Points2D.items()}
67
+ else:
68
+ ScaleProportion = 1
69
+
70
+ if Crop.shape[0] %2 ==0:
71
+ #Shape is even number
72
+ YPadTop = int((CropSize[1] - Crop.shape[0])/2)
73
+ YPadBot = int((CropSize[1] - Crop.shape[0])/2)
74
+ else:
75
+ YPadTop = int( ((CropSize[1] - Crop.shape[0])/2)-0.5)
76
+ YPadBot = int(((CropSize[1] - Crop.shape[0])/2)+0.5)
77
+ ##Padding:
78
+ if Crop.shape[1] %2 ==0:
79
+ #Shape is even number
80
+ XPadLeft = int((CropSize[0] - Crop.shape[1])/2)
81
+ XPadRight= int((CropSize[0] - Crop.shape[1])/2)
82
+ else:
83
+ XPadLeft = int(((CropSize[0] - Crop.shape[1])/2)-0.5)
84
+ XPadRight= int(((CropSize[0] - Crop.shape[1])/2)+0.5)
85
+
86
+
87
+
88
+ OutImage = cv2.copyMakeBorder(Crop, YPadTop,YPadBot,XPadLeft,XPadRight,cv2.BORDER_CONSTANT,value=[0,0,0])
89
+
90
+ return OutImage,ScaleProportion, YPadTop,XPadLeft
91
+
92
+
93
+
94
+ def DLCInference(Crop,dlc_liveObj,CropSize):
95
+ """Inference for DLC"""
96
+
97
+ ###Scale crop if image bigger than cropsize
98
+ # import ipdb;ipdb.set_trace()
99
+ if Crop.shape[0] > CropSize[0] or Crop.shape[1] > CropSize[1]: #Image bigger than crop size, scale down
100
+ ScaleRatio = min([CropSize[0]/Crop.shape[0], CropSize[1]/Crop.shape[1]])
101
+ ScaleWidth = round(Crop.shape[1] * ScaleRatio)
102
+ ScaleHeight = round(Crop.shape[0]*ScaleRatio)
103
+ resizedCrop = cv2.resize(Crop, (ScaleWidth,ScaleHeight), interpolation=cv2.INTER_LINEAR) # resize image
104
+ ScaleUpRatio = 1/ScaleRatio #ratio to scale keypoints back up to original
105
+ # import ipdb;ipdb.set_trace()
106
+ else:
107
+ resizedCrop = Crop
108
+ ScaleUpRatio = 1
109
+ # cv2.imwrite(filename="tempresize.jpg", img=resizedCrop)
110
+ # cv2.imwrite(filename="temp.jpg", img=Crop)
111
+ if dlc_liveObj.sess == None: #if first time, init
112
+ DLCPredict2D = dlc_liveObj.init_inference(resizedCrop)
113
+
114
+ DLCPredict2D= dlc_liveObj.get_pose(resizedCrop)
115
+ DLCPredict2D[:,0] = DLCPredict2D[:,0]*ScaleUpRatio
116
+ DLCPredict2D[:,1] = DLCPredict2D[:,1]*ScaleUpRatio
117
+
118
+ return DLCPredict2D
119
+
120
+
121
+ def VisualizeAll(frame, box, DLCPredict2D,MeanConfidence,ScaleBBox, imsize):
122
+ """Visualize all stuff"""
123
+ # colourList = [(255,255,0),(255,0 ,255),(128,0,128),(203,192,255),(0, 255, 255),(255, 0 , 0 ),(63,133,205),(0,255,0),(0,0,255)]
124
+ colourList = [(0,255,255),(255,0 ,255),(128,0,128),(255,192,203),(255, 255, 0),(0, 0 , 255 ),(205,133,63),(0,255,0),(255,0,0)]
125
+
126
+ ##Order: Lshoulder, Rshoulder, topKeel,botKeel,Tail,Beak,Nose,Leye,Reye
127
+ ##Points:
128
+ PlotPoints = []
129
+ for x,point in enumerate(DLCPredict2D):
130
+ roundPoint = [round(point[0]+box[0]),round(point[1]+box[1])]
131
+ cv2.circle(frame,roundPoint,1,colourList[x], 5)
132
+ PlotPoints.append(roundPoint)
133
+
134
+
135
+ ##change box to XYWH to scale down
136
+ # bbox = [box[0],box[1],box[2]-box[0],box[3]-box[1]]
137
+ # ScaleWidth = (bbox[2]/ScaleBBox)/2
138
+ # ScaleHeight = (bbox[3]/ScaleBBox)/2
139
+ # ###based on ScaleBBox, scale back down bounding box for plotting
140
+ # x1 = round(bbox[0]+ScaleWidth) if round(bbox[0]+ScaleWidth)>0 else 0
141
+ # y1 = round(bbox[1]+ScaleHeight)if round(bbox[1]+ScaleHeight)>0 else 0
142
+ # x2 = round(bbox[0]+bbox[2]-ScaleWidth) if round(bbox[0]+bbox[2]-ScaleWidth) < imsize[0] else imsize[0]
143
+ # y2 = round(bbox[1]+bbox[3]-ScaleHeight)if round(bbox[1]+bbox[3]-ScaleHeight) < imsize[1] else imsize[1]
144
+ # box = [x1,y1,x2,y2]
145
+ cv2.rectangle(frame,(round(box[0]),round(box[1])),(round(box[2]),round(box[3])),[0,0,255],3)
146
+
147
+ #plot mean confidence
148
+ # import ipdb;ipdb.set_trace()
149
+ # font = cv2.FONT_HERSHEY_SIMPLEX
150
+ # cv2.putText(frame,str(round(MeanConfidence,3)),(round(box[0]),round(box[1])),font,2,[255,0,0],2)
151
+
152
+ return frame, PlotPoints
153
+
154
+
155
+ def Inference(frame,predictor,dlc_liveObj,ScaleBBox=1,Dilate=5,DLCThreshold=0.3):
156
+ """Loop through video for SAM, save framewise info"""
157
+
158
+ InferFrame = frame.copy()
159
+ outputs = predictor(InferFrame)["instances"].to("cpu")
160
+ CropSize = (320,320)
161
+
162
+ # import ipdb;ipdb.set_trace()
163
+ imsize = [frame.shape[1],frame.shape[0]]
164
+
165
+ BirdIndex = np.where(outputs.pred_classes.numpy() == 14)[0] #14 is ID for bird
166
+ BirdBBox = outputs.pred_boxes[BirdIndex].tensor.numpy()
167
+ # import ipdb;ipdb.set_trace()
168
+ BirdMasks = (outputs.pred_masks>0.95).numpy()[BirdIndex]
169
+
170
+ for x in range(BirdBBox.shape[0]):
171
+ # import ipdb;ipdb.set_trace()
172
+
173
+ bbox = list(BirdBBox[x])
174
+ Mask = BirdMasks[x]>0
175
+ Mask = np.array(Mask,dtype=np.uint8)
176
+ # show_anns(frame, Mask)
177
+
178
+ if Dilate > 0:
179
+ DilateKernel = np.ones((Dilate,Dilate),np.uint8)
180
+ Mask = cv2.dilate(Mask,DilateKernel,iterations = 3)
181
+
182
+ # import ipdb;ipdb.set_trace()
183
+ Mask = np.array(Mask,dtype=np.uint8)
184
+ Mask = Mask.reshape(imsize[1],imsize[0],1)
185
+ Crop = cv2.bitwise_and(InferFrame, InferFrame, mask=Mask)
186
+
187
+ # cv2.imwrite(filename="temp.jpg", img = Crop)
188
+
189
+ ##change box to XYWH to scale up
190
+ bbox = [bbox[0],bbox[1],bbox[2]-bbox[0],bbox[3]-bbox[1]]
191
+ ScaleWidth = ((ScaleBBox * bbox[2])/2)-(bbox[2]/2)
192
+ ScaleHeight = ((ScaleBBox * bbox[3])/2)-(bbox[3]/2)
193
+ # import ipdb;ipdb.set_trace()
194
+ # BirdCrop = frame[round(bbox[1]):round(bbox[3]),round(bbox[0]):round(bbox[2])] #bbox is XYWH
195
+
196
+ x1 = round(bbox[0]-ScaleWidth) if round(bbox[0]-ScaleWidth)>0 else 0
197
+ y1 = round(bbox[1]-ScaleHeight)if round(bbox[1]-ScaleHeight)>0 else 0
198
+ x2 = round(bbox[0]+bbox[2]+ScaleWidth) if round(bbox[0]+bbox[2]+ScaleWidth) < imsize[0] else imsize[0]
199
+ y2 = round(bbox[1]+bbox[3]+ScaleHeight)if round(bbox[1]+bbox[3]+ScaleHeight) < imsize[1] else imsize[1]
200
+ bbox = [x1,y1,x2,y2]
201
+ BirdCrop = Crop[y1:y2,x1:x2] #bbox is XYWH
202
+
203
+ DLCPredict2D= DLCInference(BirdCrop,dlc_liveObj,CropSize)
204
+ MeanConfidence = DLCPredict2D[:,2].mean()
205
+
206
+ if MeanConfidence > DLCThreshold: #if mean keypoint confidence is higher than this threshold, consider bird
207
+ bbox.append(MeanConfidence)
208
+ frame, PlotPoints = VisualizeAll(frame, bbox, DLCPredict2D,MeanConfidence,ScaleBBox,imsize)
209
+
210
+ if BirdBBox.shape[0] == 0:
211
+ DLCPredict2D= DLCInference(InferFrame,dlc_liveObj,CropSize)
212
+ MeanConfidence = DLCPredict2D[:,2].mean()
213
+ bbox = [0,0,0,0]
214
+ if MeanConfidence > DLCThreshold: #if mean keypoint confidence is higher than this threshold, consider bird
215
+ frame, PlotPoints = VisualizeAll(frame, bbox, DLCPredict2D,MeanConfidence,ScaleBBox,imsize)
216
+
217
+ return frame
218
+
219
+
220
+
221
+
222
+ if __name__ == "__main__":
223
+ # VidPath = "/media/alexchan/Extreme SSD/WorkDir/Pigeon3DTrack/ManualClickTrials/Videos/Cam1_C0008.MP4_Trimmed.mp4"
224
+ VidPath = "/media/alexchan/Extreme SSD/SampleDatasets/PigeonsEverywhere/2022-12-01_Pilot_bottom_control.mp4"
225
+
226
+ OutDir = "/media/alexchan/Extreme SSD/WorkDir/Pigeon3DTrack/OutdoorTracking/Final2D"
227
+
228
+ ExportModelPath = "/media/alexchan/Extreme SSD/WorkDir/Pigeon3DTrack/Weights/DLC_Weights/N6000_DLC_Mask/exported-models/DLC_DLC_Segmented_resnet_50_iteration-0_shuffle-1"
229
+ # ExportModelPath = "/media/alexchan/Extreme SSD/WorkDir/Pigeon3DTrack/Weights/DLC_Weights/DLC_PigeonSuperModel_imgaug_efficientnet-b0_iteration-0_shuffle-2"
230
+
231
+ if not os.path.isdir(ExportModelPath):
232
+ #need to export model first
233
+ # DLC_Config = "/media/alexchan/Extreme SSD/SampleDatasets/ImageTrainingData/N5000/DLC_Seg_Aug/config.yaml"
234
+
235
+ dlc.export_model(DLC_Config)
236
+ else:
237
+ print("model already exported!")
238
+
239
+ CropSize = (320,320)
240
+ # WeightPath = "/home/alexchan/Documents/Pigeon3DTrack/Data/YOLO_Weights/yolov8m.pt"
241
+ # VidPath= "/media/alexchan/Extreme SSD/PigeonOutdoors/28032023/Cam1_C0004.MP4"
242
+ # import ipdb;ipdb.set_trace()
243
+ # YOLOModel = YOLO(YOLOPath)
244
+ device = "cuda"
245
+ # device = "cpu"
246
+
247
+ ###Detectron:
248
+ cfg = get_cfg()
249
+ # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
250
+ cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"))
251
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
252
+ # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
253
+ cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml")
254
+ predictor = DefaultPredictor(cfg)
255
+
256
+
257
+ ##DLC:
258
+ dlc_proc = Processor()
259
+ dlc_liveObj = DLCLive(ExportModelPath, processor=dlc_proc)
260
+
261
+ RunInference(predictor,dlc_liveObj, device,OutDir,
262
+ VidPath,CropSize,startFrame=0,
263
+ TotalFrames =900, ScaleBBox=1.3,
264
+ DLCThreshold = 0, Dilate=5)
265
+
266
+
267
+
268
+ ###DLC tests
269
+ # DLC_Config = "/media/alexchan/Extreme SSD/SampleDatasets/ImageTrainingData/N5000/DLC/config.yaml"
270
+ # Video = "/media/alexchan/Extreme SSD/SampleDatasets/ImageTrainingData/N5000/DLC/videos/Video1.mp4"
271
+ # dlc.analyze_videos(DLC_Config,[Video])
272
+ # dlc.export_model(DLC_Config)
273
+
274
+ # import pandas as pd
275
+ # pd.read_hdf("/media/alexchan/Extreme SSD/SampleDatasets/ImageTrainingData/N5000/DLC/videos/Video1DLC_resnet50_DLC20230401-173058shuffle1_3000.h5")
Code/__pycache__/Inference.cpython-311.pyc ADDED
Binary file (12.1 kB). View file
 
Repositories/DeepLabCut-live/.gitignore ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DeepLabCut-live
2
+
3
+ # Data related to benchmark!
4
+ benchmarking/Data*
5
+ benchmarking/results*
6
+
7
+ *test*
8
+ **DS_Store*
9
+ *vscode*
10
+
11
+ # Byte-compiled / optimized / DLL files
12
+ __pycache__/
13
+ *.py[cod]
14
+ *$py.class
15
+
16
+ # C extensions
17
+ *.so
18
+
19
+ # Distribution / packaging
20
+ .Python
21
+ build/
22
+ develop-eggs/
23
+ dist/
24
+ downloads/
25
+ eggs/
26
+ .eggs/
27
+ lib/
28
+ lib64/
29
+ parts/
30
+ sdist/
31
+ var/
32
+ wheels/
33
+ pip-wheel-metadata/
34
+ share/python-wheels/
35
+ *.egg-info/
36
+ .installed.cfg
37
+ *.egg
38
+ MANIFEST
39
+
40
+ # PyInstaller
41
+ # Usually these files are written by a python script from a template
42
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
43
+ *.manifest
44
+ *.spec
45
+
46
+ # Installer logs
47
+ pip-log.txt
48
+ pip-delete-this-directory.txt
49
+
50
+ # Unit test / coverage reports
51
+ htmlcov/
52
+ .tox/
53
+ .nox/
54
+ .coverage
55
+ .coverage.*
56
+ .cache
57
+ nosetests.xml
58
+ coverage.xml
59
+ *.cover
60
+ *.py,cover
61
+ .hypothesis/
62
+ .pytest_cache/
63
+
64
+ # Translations
65
+ *.mo
66
+ *.pot
67
+
68
+ # Django stuff:
69
+ *.log
70
+ local_settings.py
71
+ db.sqlite3
72
+ db.sqlite3-journal
73
+
74
+ # Flask stuff:
75
+ instance/
76
+ .webassets-cache
77
+
78
+ # Scrapy stuff:
79
+ .scrapy
80
+
81
+ # Sphinx documentation
82
+ docs/_build/
83
+
84
+ # PyBuilder
85
+ target/
86
+
87
+ # Jupyter Notebook
88
+ .ipynb_checkpoints
89
+
90
+ # IPython
91
+ profile_default/
92
+ ipython_config.py
93
+
94
+ # pyenv
95
+ .python-version
96
+
97
+ # pipenv
98
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
99
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
100
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
101
+ # install all needed dependencies.
102
+ #Pipfile.lock
103
+
104
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105
+ __pypackages__/
106
+
107
+ # Celery stuff
108
+ celerybeat-schedule
109
+ celerybeat.pid
110
+
111
+ # SageMath parsed files
112
+ *.sage.py
113
+
114
+ # Environments
115
+ .env
116
+ .venv
117
+ env/
118
+ venv/
119
+ ENV/
120
+ env.bak/
121
+ venv.bak/
122
+
123
+ # Spyder project settings
124
+ .spyderproject
125
+ .spyproject
126
+
127
+ # Rope project settings
128
+ .ropeproject
129
+
130
+ # mkdocs documentation
131
+ /site
132
+
133
+ # mypy
134
+ .mypy_cache/
135
+ .dmypy.json
136
+ dmypy.json
137
+
138
+ # Pyre type checker
139
+ .pyre/
Repositories/DeepLabCut-live/CITATION.cff ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This CITATION.cff file was generated with cffinit.
2
+ # Visit https://bit.ly/cffinit to generate yours today!
3
+
4
+ cff-version: 1.2.0
5
+ title: >-
6
+ Real-time, low-latency closed-loop feedback using
7
+ markerless posture tracking
8
+ message: >-
9
+ If you utilize our tool, please [cite Kane et al,
10
+ eLife
11
+ 2020](https://elifesciences.org/articles/61909).
12
+ The preprint is available here:
13
+ https://www.biorxiv.org/content/10.1101/2020.08.04.236422v2
14
+ type: article
15
+ authors:
16
+ - given-names: Gary
17
+ name-particle: A
18
+ family-names: Kane
19
+ affiliation: >-
20
+ The Rowland Institute at Harvard, Harvard
21
+ University, Cambridge, United States
22
+ - given-names: Gonçalo
23
+ family-names: Lopes
24
+ affiliation: 'NeuroGEARS Ltd, London, United Kingdom'
25
+ - given-names: Jonny
26
+ name-particle: L
27
+ family-names: Saunders
28
+ affiliation: >-
29
+ Institute of Neuroscience, Department of
30
+ Psychology, University of Oregon, Eugene,
31
+ United States
32
+ - given-names: Alexander
33
+ family-names: Mathis
34
+ affiliation: >-
35
+ The Rowland Institute at Harvard, Harvard
36
+ University, Cambridge, United States; Center
37
+ for Neuroprosthetics, Center for Intelligent
38
+ Systems, & Brain Mind Institute, School of Life
39
+ Sciences, Swiss Federal Institute of Technology
40
+ (EPFL), Lausanne, Switzerland
41
+ - given-names: Mackenzie
42
+ name-particle: W
43
+ family-names: Mathis
44
+ affiliation: >-
45
+ The Rowland Institute at Harvard, Harvard
46
+ University, Cambridge, United States; Center
47
+ for Neuroprosthetics, Center for Intelligent
48
+ Systems, & Brain Mind Institute, School of Life
49
+ Sciences, Swiss Federal Institute of Technology
50
+ (EPFL), Lausanne, Switzerland
51
+ email: mackenzie.mathis@epfl.ch
52
+ date-released: 2020-08-05
53
+ doi: "10.7554/eLife.61909"
54
+ license: "AGPL-3.0-or-later"
55
+ version: "1.0.3"
Repositories/DeepLabCut-live/LICENSE ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2020-2022 by Mackenzie Mathis, Gary Kane, Alexander Mathis and contributors. All rights reserved.
2
+ This software may not be used to harm any person deliberately.
3
+
4
+ This project and all its files are licensed under GNU AGPLv3 or later version.
5
+
6
+ GNU AFFERO GENERAL PUBLIC LICENSE
7
+ Version 3, 19 November 2007
8
+
9
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
10
+ Everyone is permitted to copy and distribute verbatim copies
11
+ of this license document, but changing it is not allowed.
12
+
13
+ Preamble
14
+
15
+ The GNU Affero General Public License is a free, copyleft license for
16
+ software and other kinds of works, specifically designed to ensure
17
+ cooperation with the community in the case of network server software.
18
+
19
+ The licenses for most software and other practical works are designed
20
+ to take away your freedom to share and change the works. By contrast,
21
+ our General Public Licenses are intended to guarantee your freedom to
22
+ share and change all versions of a program--to make sure it remains free
23
+ software for all its users.
24
+
25
+ When we speak of free software, we are referring to freedom, not
26
+ price. Our General Public Licenses are designed to make sure that you
27
+ have the freedom to distribute copies of free software (and charge for
28
+ them if you wish), that you receive source code or can get it if you
29
+ want it, that you can change the software or use pieces of it in new
30
+ free programs, and that you know you can do these things.
31
+
32
+ Developers that use our General Public Licenses protect your rights
33
+ with two steps: (1) assert copyright on the software, and (2) offer
34
+ you this License which gives you legal permission to copy, distribute
35
+ and/or modify the software.
36
+
37
+ A secondary benefit of defending all users' freedom is that
38
+ improvements made in alternate versions of the program, if they
39
+ receive widespread use, become available for other developers to
40
+ incorporate. Many developers of free software are heartened and
41
+ encouraged by the resulting cooperation. However, in the case of
42
+ software used on network servers, this result may fail to come about.
43
+ The GNU General Public License permits making a modified version and
44
+ letting the public access it on a server without ever releasing its
45
+ source code to the public.
46
+
47
+ The GNU Affero General Public License is designed specifically to
48
+ ensure that, in such cases, the modified source code becomes available
49
+ to the community. It requires the operator of a network server to
50
+ provide the source code of the modified version running there to the
51
+ users of that server. Therefore, public use of a modified version, on
52
+ a publicly accessible server, gives the public access to the source
53
+ code of the modified version.
54
+
55
+ An older license, called the Affero General Public License and
56
+ published by Affero, was designed to accomplish similar goals. This is
57
+ a different license, not a version of the Affero GPL, but Affero has
58
+ released a new version of the Affero GPL which permits relicensing under
59
+ this license.
60
+
61
+ The precise terms and conditions for copying, distribution and
62
+ modification follow.
63
+
64
+ TERMS AND CONDITIONS
65
+
66
+ 0. Definitions.
67
+
68
+ "This License" refers to version 3 of the GNU Affero General Public License.
69
+
70
+ "Copyright" also means copyright-like laws that apply to other kinds of
71
+ works, such as semiconductor masks.
72
+
73
+ "The Program" refers to any copyrightable work licensed under this
74
+ License. Each licensee is addressed as "you". "Licensees" and
75
+ "recipients" may be individuals or organizations.
76
+
77
+ To "modify" a work means to copy from or adapt all or part of the work
78
+ in a fashion requiring copyright permission, other than the making of an
79
+ exact copy. The resulting work is called a "modified version" of the
80
+ earlier work or a work "based on" the earlier work.
81
+
82
+ A "covered work" means either the unmodified Program or a work based
83
+ on the Program.
84
+
85
+ To "propagate" a work means to do anything with it that, without
86
+ permission, would make you directly or secondarily liable for
87
+ infringement under applicable copyright law, except executing it on a
88
+ computer or modifying a private copy. Propagation includes copying,
89
+ distribution (with or without modification), making available to the
90
+ public, and in some countries other activities as well.
91
+
92
+ To "convey" a work means any kind of propagation that enables other
93
+ parties to make or receive copies. Mere interaction with a user through
94
+ a computer network, with no transfer of a copy, is not conveying.
95
+
96
+ An interactive user interface displays "Appropriate Legal Notices"
97
+ to the extent that it includes a convenient and prominently visible
98
+ feature that (1) displays an appropriate copyright notice, and (2)
99
+ tells the user that there is no warranty for the work (except to the
100
+ extent that warranties are provided), that licensees may convey the
101
+ work under this License, and how to view a copy of this License. If
102
+ the interface presents a list of user commands or options, such as a
103
+ menu, a prominent item in the list meets this criterion.
104
+
105
+ 1. Source Code.
106
+
107
+ The "source code" for a work means the preferred form of the work
108
+ for making modifications to it. "Object code" means any non-source
109
+ form of a work.
110
+
111
+ A "Standard Interface" means an interface that either is an official
112
+ standard defined by a recognized standards body, or, in the case of
113
+ interfaces specified for a particular programming language, one that
114
+ is widely used among developers working in that language.
115
+
116
+ The "System Libraries" of an executable work include anything, other
117
+ than the work as a whole, that (a) is included in the normal form of
118
+ packaging a Major Component, but which is not part of that Major
119
+ Component, and (b) serves only to enable use of the work with that
120
+ Major Component, or to implement a Standard Interface for which an
121
+ implementation is available to the public in source code form. A
122
+ "Major Component", in this context, means a major essential component
123
+ (kernel, window system, and so on) of the specific operating system
124
+ (if any) on which the executable work runs, or a compiler used to
125
+ produce the work, or an object code interpreter used to run it.
126
+
127
+ The "Corresponding Source" for a work in object code form means all
128
+ the source code needed to generate, install, and (for an executable
129
+ work) run the object code and to modify the work, including scripts to
130
+ control those activities. However, it does not include the work's
131
+ System Libraries, or general-purpose tools or generally available free
132
+ programs which are used unmodified in performing those activities but
133
+ which are not part of the work. For example, Corresponding Source
134
+ includes interface definition files associated with source files for
135
+ the work, and the source code for shared libraries and dynamically
136
+ linked subprograms that the work is specifically designed to require,
137
+ such as by intimate data communication or control flow between those
138
+ subprograms and other parts of the work.
139
+
140
+ The Corresponding Source need not include anything that users
141
+ can regenerate automatically from other parts of the Corresponding
142
+ Source.
143
+
144
+ The Corresponding Source for a work in source code form is that
145
+ same work.
146
+
147
+ 2. Basic Permissions.
148
+
149
+ All rights granted under this License are granted for the term of
150
+ copyright on the Program, and are irrevocable provided the stated
151
+ conditions are met. This License explicitly affirms your unlimited
152
+ permission to run the unmodified Program. The output from running a
153
+ covered work is covered by this License only if the output, given its
154
+ content, constitutes a covered work. This License acknowledges your
155
+ rights of fair use or other equivalent, as provided by copyright law.
156
+
157
+ You may make, run and propagate covered works that you do not
158
+ convey, without conditions so long as your license otherwise remains
159
+ in force. You may convey covered works to others for the sole purpose
160
+ of having them make modifications exclusively for you, or provide you
161
+ with facilities for running those works, provided that you comply with
162
+ the terms of this License in conveying all material for which you do
163
+ not control copyright. Those thus making or running the covered works
164
+ for you must do so exclusively on your behalf, under your direction
165
+ and control, on terms that prohibit them from making any copies of
166
+ your copyrighted material outside their relationship with you.
167
+
168
+ Conveying under any other circumstances is permitted solely under
169
+ the conditions stated below. Sublicensing is not allowed; section 10
170
+ makes it unnecessary.
171
+
172
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
173
+
174
+ No covered work shall be deemed part of an effective technological
175
+ measure under any applicable law fulfilling obligations under article
176
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
177
+ similar laws prohibiting or restricting circumvention of such
178
+ measures.
179
+
180
+ When you convey a covered work, you waive any legal power to forbid
181
+ circumvention of technological measures to the extent such circumvention
182
+ is effected by exercising rights under this License with respect to
183
+ the covered work, and you disclaim any intention to limit operation or
184
+ modification of the work as a means of enforcing, against the work's
185
+ users, your or third parties' legal rights to forbid circumvention of
186
+ technological measures.
187
+
188
+ 4. Conveying Verbatim Copies.
189
+
190
+ You may convey verbatim copies of the Program's source code as you
191
+ receive it, in any medium, provided that you conspicuously and
192
+ appropriately publish on each copy an appropriate copyright notice;
193
+ keep intact all notices stating that this License and any
194
+ non-permissive terms added in accord with section 7 apply to the code;
195
+ keep intact all notices of the absence of any warranty; and give all
196
+ recipients a copy of this License along with the Program.
197
+
198
+ You may charge any price or no price for each copy that you convey,
199
+ and you may offer support or warranty protection for a fee.
200
+
201
+ 5. Conveying Modified Source Versions.
202
+
203
+ You may convey a work based on the Program, or the modifications to
204
+ produce it from the Program, in the form of source code under the
205
+ terms of section 4, provided that you also meet all of these conditions:
206
+
207
+ a) The work must carry prominent notices stating that you modified
208
+ it, and giving a relevant date.
209
+
210
+ b) The work must carry prominent notices stating that it is
211
+ released under this License and any conditions added under section
212
+ 7. This requirement modifies the requirement in section 4 to
213
+ "keep intact all notices".
214
+
215
+ c) You must license the entire work, as a whole, under this
216
+ License to anyone who comes into possession of a copy. This
217
+ License will therefore apply, along with any applicable section 7
218
+ additional terms, to the whole of the work, and all its parts,
219
+ regardless of how they are packaged. This License gives no
220
+ permission to license the work in any other way, but it does not
221
+ invalidate such permission if you have separately received it.
222
+
223
+ d) If the work has interactive user interfaces, each must display
224
+ Appropriate Legal Notices; however, if the Program has interactive
225
+ interfaces that do not display Appropriate Legal Notices, your
226
+ work need not make them do so.
227
+
228
+ A compilation of a covered work with other separate and independent
229
+ works, which are not by their nature extensions of the covered work,
230
+ and which are not combined with it such as to form a larger program,
231
+ in or on a volume of a storage or distribution medium, is called an
232
+ "aggregate" if the compilation and its resulting copyright are not
233
+ used to limit the access or legal rights of the compilation's users
234
+ beyond what the individual works permit. Inclusion of a covered work
235
+ in an aggregate does not cause this License to apply to the other
236
+ parts of the aggregate.
237
+
238
+ 6. Conveying Non-Source Forms.
239
+
240
+ You may convey a covered work in object code form under the terms
241
+ of sections 4 and 5, provided that you also convey the
242
+ machine-readable Corresponding Source under the terms of this License,
243
+ in one of these ways:
244
+
245
+ a) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by the
247
+ Corresponding Source fixed on a durable physical medium
248
+ customarily used for software interchange.
249
+
250
+ b) Convey the object code in, or embodied in, a physical product
251
+ (including a physical distribution medium), accompanied by a
252
+ written offer, valid for at least three years and valid for as
253
+ long as you offer spare parts or customer support for that product
254
+ model, to give anyone who possesses the object code either (1) a
255
+ copy of the Corresponding Source for all the software in the
256
+ product that is covered by this License, on a durable physical
257
+ medium customarily used for software interchange, for a price no
258
+ more than your reasonable cost of physically performing this
259
+ conveying of source, or (2) access to copy the
260
+ Corresponding Source from a network server at no charge.
261
+
262
+ c) Convey individual copies of the object code with a copy of the
263
+ written offer to provide the Corresponding Source. This
264
+ alternative is allowed only occasionally and noncommercially, and
265
+ only if you received the object code with such an offer, in accord
266
+ with subsection 6b.
267
+
268
+ d) Convey the object code by offering access from a designated
269
+ place (gratis or for a charge), and offer equivalent access to the
270
+ Corresponding Source in the same way through the same place at no
271
+ further charge. You need not require recipients to copy the
272
+ Corresponding Source along with the object code. If the place to
273
+ copy the object code is a network server, the Corresponding Source
274
+ may be on a different server (operated by you or a third party)
275
+ that supports equivalent copying facilities, provided you maintain
276
+ clear directions next to the object code saying where to find the
277
+ Corresponding Source. Regardless of what server hosts the
278
+ Corresponding Source, you remain obligated to ensure that it is
279
+ available for as long as needed to satisfy these requirements.
280
+
281
+ e) Convey the object code using peer-to-peer transmission, provided
282
+ you inform other peers where the object code and Corresponding
283
+ Source of the work are being offered to the general public at no
284
+ charge under subsection 6d.
285
+
286
+ A separable portion of the object code, whose source code is excluded
287
+ from the Corresponding Source as a System Library, need not be
288
+ included in conveying the object code work.
289
+
290
+ A "User Product" is either (1) a "consumer product", which means any
291
+ tangible personal property which is normally used for personal, family,
292
+ or household purposes, or (2) anything designed or sold for incorporation
293
+ into a dwelling. In determining whether a product is a consumer product,
294
+ doubtful cases shall be resolved in favor of coverage. For a particular
295
+ product received by a particular user, "normally used" refers to a
296
+ typical or common use of that class of product, regardless of the status
297
+ of the particular user or of the way in which the particular user
298
+ actually uses, or expects or is expected to use, the product. A product
299
+ is a consumer product regardless of whether the product has substantial
300
+ commercial, industrial or non-consumer uses, unless such uses represent
301
+ the only significant mode of use of the product.
302
+
303
+ "Installation Information" for a User Product means any methods,
304
+ procedures, authorization keys, or other information required to install
305
+ and execute modified versions of a covered work in that User Product from
306
+ a modified version of its Corresponding Source. The information must
307
+ suffice to ensure that the continued functioning of the modified object
308
+ code is in no case prevented or interfered with solely because
309
+ modification has been made.
310
+
311
+ If you convey an object code work under this section in, or with, or
312
+ specifically for use in, a User Product, and the conveying occurs as
313
+ part of a transaction in which the right of possession and use of the
314
+ User Product is transferred to the recipient in perpetuity or for a
315
+ fixed term (regardless of how the transaction is characterized), the
316
+ Corresponding Source conveyed under this section must be accompanied
317
+ by the Installation Information. But this requirement does not apply
318
+ if neither you nor any third party retains the ability to install
319
+ modified object code on the User Product (for example, the work has
320
+ been installed in ROM).
321
+
322
+ The requirement to provide Installation Information does not include a
323
+ requirement to continue to provide support service, warranty, or updates
324
+ for a work that has been modified or installed by the recipient, or for
325
+ the User Product in which it has been modified or installed. Access to a
326
+ network may be denied when the modification itself materially and
327
+ adversely affects the operation of the network or violates the rules and
328
+ protocols for communication across the network.
329
+
330
+ Corresponding Source conveyed, and Installation Information provided,
331
+ in accord with this section must be in a format that is publicly
332
+ documented (and with an implementation available to the public in
333
+ source code form), and must require no special password or key for
334
+ unpacking, reading or copying.
335
+
336
+ 7. Additional Terms.
337
+
338
+ "Additional permissions" are terms that supplement the terms of this
339
+ License by making exceptions from one or more of its conditions.
340
+ Additional permissions that are applicable to the entire Program shall
341
+ be treated as though they were included in this License, to the extent
342
+ that they are valid under applicable law. If additional permissions
343
+ apply only to part of the Program, that part may be used separately
344
+ under those permissions, but the entire Program remains governed by
345
+ this License without regard to the additional permissions.
346
+
347
+ When you convey a copy of a covered work, you may at your option
348
+ remove any additional permissions from that copy, or from any part of
349
+ it. (Additional permissions may be written to require their own
350
+ removal in certain cases when you modify the work.) You may place
351
+ additional permissions on material, added by you to a covered work,
352
+ for which you have or can give appropriate copyright permission.
353
+
354
+ Notwithstanding any other provision of this License, for material you
355
+ add to a covered work, you may (if authorized by the copyright holders of
356
+ that material) supplement the terms of this License with terms:
357
+
358
+ a) Disclaiming warranty or limiting liability differently from the
359
+ terms of sections 15 and 16 of this License; or
360
+
361
+ b) Requiring preservation of specified reasonable legal notices or
362
+ author attributions in that material or in the Appropriate Legal
363
+ Notices displayed by works containing it; or
364
+
365
+ c) Prohibiting misrepresentation of the origin of that material, or
366
+ requiring that modified versions of such material be marked in
367
+ reasonable ways as different from the original version; or
368
+
369
+ d) Limiting the use for publicity purposes of names of licensors or
370
+ authors of the material; or
371
+
372
+ e) Declining to grant rights under trademark law for use of some
373
+ trade names, trademarks, or service marks; or
374
+
375
+ f) Requiring indemnification of licensors and authors of that
376
+ material by anyone who conveys the material (or modified versions of
377
+ it) with contractual assumptions of liability to the recipient, for
378
+ any liability that these contractual assumptions directly impose on
379
+ those licensors and authors.
380
+
381
+ All other non-permissive additional terms are considered "further
382
+ restrictions" within the meaning of section 10. If the Program as you
383
+ received it, or any part of it, contains a notice stating that it is
384
+ governed by this License along with a term that is a further
385
+ restriction, you may remove that term. If a license document contains
386
+ a further restriction but permits relicensing or conveying under this
387
+ License, you may add to a covered work material governed by the terms
388
+ of that license document, provided that the further restriction does
389
+ not survive such relicensing or conveying.
390
+
391
+ If you add terms to a covered work in accord with this section, you
392
+ must place, in the relevant source files, a statement of the
393
+ additional terms that apply to those files, or a notice indicating
394
+ where to find the applicable terms.
395
+
396
+ Additional terms, permissive or non-permissive, may be stated in the
397
+ form of a separately written license, or stated as exceptions;
398
+ the above requirements apply either way.
399
+
400
+ 8. Termination.
401
+
402
+ You may not propagate or modify a covered work except as expressly
403
+ provided under this License. Any attempt otherwise to propagate or
404
+ modify it is void, and will automatically terminate your rights under
405
+ this License (including any patent licenses granted under the third
406
+ paragraph of section 11).
407
+
408
+ However, if you cease all violation of this License, then your
409
+ license from a particular copyright holder is reinstated (a)
410
+ provisionally, unless and until the copyright holder explicitly and
411
+ finally terminates your license, and (b) permanently, if the copyright
412
+ holder fails to notify you of the violation by some reasonable means
413
+ prior to 60 days after the cessation.
414
+
415
+ Moreover, your license from a particular copyright holder is
416
+ reinstated permanently if the copyright holder notifies you of the
417
+ violation by some reasonable means, this is the first time you have
418
+ received notice of violation of this License (for any work) from that
419
+ copyright holder, and you cure the violation prior to 30 days after
420
+ your receipt of the notice.
421
+
422
+ Termination of your rights under this section does not terminate the
423
+ licenses of parties who have received copies or rights from you under
424
+ this License. If your rights have been terminated and not permanently
425
+ reinstated, you do not qualify to receive new licenses for the same
426
+ material under section 10.
427
+
428
+ 9. Acceptance Not Required for Having Copies.
429
+
430
+ You are not required to accept this License in order to receive or
431
+ run a copy of the Program. Ancillary propagation of a covered work
432
+ occurring solely as a consequence of using peer-to-peer transmission
433
+ to receive a copy likewise does not require acceptance. However,
434
+ nothing other than this License grants you permission to propagate or
435
+ modify any covered work. These actions infringe copyright if you do
436
+ not accept this License. Therefore, by modifying or propagating a
437
+ covered work, you indicate your acceptance of this License to do so.
438
+
439
+ 10. Automatic Licensing of Downstream Recipients.
440
+
441
+ Each time you convey a covered work, the recipient automatically
442
+ receives a license from the original licensors, to run, modify and
443
+ propagate that work, subject to this License. You are not responsible
444
+ for enforcing compliance by third parties with this License.
445
+
446
+ An "entity transaction" is a transaction transferring control of an
447
+ organization, or substantially all assets of one, or subdividing an
448
+ organization, or merging organizations. If propagation of a covered
449
+ work results from an entity transaction, each party to that
450
+ transaction who receives a copy of the work also receives whatever
451
+ licenses to the work the party's predecessor in interest had or could
452
+ give under the previous paragraph, plus a right to possession of the
453
+ Corresponding Source of the work from the predecessor in interest, if
454
+ the predecessor has it or can get it with reasonable efforts.
455
+
456
+ You may not impose any further restrictions on the exercise of the
457
+ rights granted or affirmed under this License. For example, you may
458
+ not impose a license fee, royalty, or other charge for exercise of
459
+ rights granted under this License, and you may not initiate litigation
460
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
461
+ any patent claim is infringed by making, using, selling, offering for
462
+ sale, or importing the Program or any portion of it.
463
+
464
+ 11. Patents.
465
+
466
+ A "contributor" is a copyright holder who authorizes use under this
467
+ License of the Program or a work on which the Program is based. The
468
+ work thus licensed is called the contributor's "contributor version".
469
+
470
+ A contributor's "essential patent claims" are all patent claims
471
+ owned or controlled by the contributor, whether already acquired or
472
+ hereafter acquired, that would be infringed by some manner, permitted
473
+ by this License, of making, using, or selling its contributor version,
474
+ but do not include claims that would be infringed only as a
475
+ consequence of further modification of the contributor version. For
476
+ purposes of this definition, "control" includes the right to grant
477
+ patent sublicenses in a manner consistent with the requirements of
478
+ this License.
479
+
480
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
481
+ patent license under the contributor's essential patent claims, to
482
+ make, use, sell, offer for sale, import and otherwise run, modify and
483
+ propagate the contents of its contributor version.
484
+
485
+ In the following three paragraphs, a "patent license" is any express
486
+ agreement or commitment, however denominated, not to enforce a patent
487
+ (such as an express permission to practice a patent or covenant not to
488
+ sue for patent infringement). To "grant" such a patent license to a
489
+ party means to make such an agreement or commitment not to enforce a
490
+ patent against the party.
491
+
492
+ If you convey a covered work, knowingly relying on a patent license,
493
+ and the Corresponding Source of the work is not available for anyone
494
+ to copy, free of charge and under the terms of this License, through a
495
+ publicly available network server or other readily accessible means,
496
+ then you must either (1) cause the Corresponding Source to be so
497
+ available, or (2) arrange to deprive yourself of the benefit of the
498
+ patent license for this particular work, or (3) arrange, in a manner
499
+ consistent with the requirements of this License, to extend the patent
500
+ license to downstream recipients. "Knowingly relying" means you have
501
+ actual knowledge that, but for the patent license, your conveying the
502
+ covered work in a country, or your recipient's use of the covered work
503
+ in a country, would infringe one or more identifiable patents in that
504
+ country that you have reason to believe are valid.
505
+
506
+ If, pursuant to or in connection with a single transaction or
507
+ arrangement, you convey, or propagate by procuring conveyance of, a
508
+ covered work, and grant a patent license to some of the parties
509
+ receiving the covered work authorizing them to use, propagate, modify
510
+ or convey a specific copy of the covered work, then the patent license
511
+ you grant is automatically extended to all recipients of the covered
512
+ work and works based on it.
513
+
514
+ A patent license is "discriminatory" if it does not include within
515
+ the scope of its coverage, prohibits the exercise of, or is
516
+ conditioned on the non-exercise of one or more of the rights that are
517
+ specifically granted under this License. You may not convey a covered
518
+ work if you are a party to an arrangement with a third party that is
519
+ in the business of distributing software, under which you make payment
520
+ to the third party based on the extent of your activity of conveying
521
+ the work, and under which the third party grants, to any of the
522
+ parties who would receive the covered work from you, a discriminatory
523
+ patent license (a) in connection with copies of the covered work
524
+ conveyed by you (or copies made from those copies), or (b) primarily
525
+ for and in connection with specific products or compilations that
526
+ contain the covered work, unless you entered into that arrangement,
527
+ or that patent license was granted, prior to 28 March 2007.
528
+
529
+ Nothing in this License shall be construed as excluding or limiting
530
+ any implied license or other defenses to infringement that may
531
+ otherwise be available to you under applicable patent law.
532
+
533
+ 12. No Surrender of Others' Freedom.
534
+
535
+ If conditions are imposed on you (whether by court order, agreement or
536
+ otherwise) that contradict the conditions of this License, they do not
537
+ excuse you from the conditions of this License. If you cannot convey a
538
+ covered work so as to satisfy simultaneously your obligations under this
539
+ License and any other pertinent obligations, then as a consequence you may
540
+ not convey it at all. For example, if you agree to terms that obligate you
541
+ to collect a royalty for further conveying from those to whom you convey
542
+ the Program, the only way you could satisfy both those terms and this
543
+ License would be to refrain entirely from conveying the Program.
544
+
545
+ 13. Remote Network Interaction; Use with the GNU General Public License.
546
+
547
+ Notwithstanding any other provision of this License, if you modify the
548
+ Program, your modified version must prominently offer all users
549
+ interacting with it remotely through a computer network (if your version
550
+ supports such interaction) an opportunity to receive the Corresponding
551
+ Source of your version by providing access to the Corresponding Source
552
+ from a network server at no charge, through some standard or customary
553
+ means of facilitating copying of software. This Corresponding Source
554
+ shall include the Corresponding Source for any work covered by version 3
555
+ of the GNU General Public License that is incorporated pursuant to the
556
+ following paragraph.
557
+
558
+ Notwithstanding any other provision of this License, you have
559
+ permission to link or combine any covered work with a work licensed
560
+ under version 3 of the GNU General Public License into a single
561
+ combined work, and to convey the resulting work. The terms of this
562
+ License will continue to apply to the part which is the covered work,
563
+ but the work with which it is combined will remain governed by version
564
+ 3 of the GNU General Public License.
565
+
566
+ 14. Revised Versions of this License.
567
+
568
+ The Free Software Foundation may publish revised and/or new versions of
569
+ the GNU Affero General Public License from time to time. Such new versions
570
+ will be similar in spirit to the present version, but may differ in detail to
571
+ address new problems or concerns.
572
+
573
+ Each version is given a distinguishing version number. If the
574
+ Program specifies that a certain numbered version of the GNU Affero General
575
+ Public License "or any later version" applies to it, you have the
576
+ option of following the terms and conditions either of that numbered
577
+ version or of any later version published by the Free Software
578
+ Foundation. If the Program does not specify a version number of the
579
+ GNU Affero General Public License, you may choose any version ever published
580
+ by the Free Software Foundation.
581
+
582
+ If the Program specifies that a proxy can decide which future
583
+ versions of the GNU Affero General Public License can be used, that proxy's
584
+ public statement of acceptance of a version permanently authorizes you
585
+ to choose that version for the Program.
586
+
587
+ Later license versions may give you additional or different
588
+ permissions. However, no additional obligations are imposed on any
589
+ author or copyright holder as a result of your choosing to follow a
590
+ later version.
591
+
592
+ 15. Disclaimer of Warranty.
593
+
594
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
595
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
596
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
597
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
598
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
599
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
600
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
601
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
602
+
603
+ 16. Limitation of Liability.
604
+
605
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
606
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
607
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
608
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
609
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
610
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
611
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
612
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
613
+ SUCH DAMAGES.
614
+
615
+ 17. Interpretation of Sections 15 and 16.
616
+
617
+ If the disclaimer of warranty and limitation of liability provided
618
+ above cannot be given local legal effect according to their terms,
619
+ reviewing courts shall apply local law that most closely approximates
620
+ an absolute waiver of all civil liability in connection with the
621
+ Program, unless a warranty or assumption of liability accompanies a
622
+ copy of the Program in return for a fee.
623
+
624
+ END OF TERMS AND CONDITIONS
625
+
626
+ How to Apply These Terms to Your New Programs
627
+
628
+ If you develop a new program, and you want it to be of the greatest
629
+ possible use to the public, the best way to achieve this is to make it
630
+ free software which everyone can redistribute and change under these terms.
631
+
632
+ To do so, attach the following notices to the program. It is safest
633
+ to attach them to the start of each source file to most effectively
634
+ state the exclusion of warranty; and each file should have at least
635
+ the "copyright" line and a pointer to where the full notice is found.
636
+
637
+ <one line to give the program's name and a brief idea of what it does.>
638
+ Copyright (C) <year> <name of author>
639
+
640
+ This program is free software: you can redistribute it and/or modify
641
+ it under the terms of the GNU Affero General Public License as published
642
+ by the Free Software Foundation, either version 3 of the License, or
643
+ (at your option) any later version.
644
+
645
+ This program is distributed in the hope that it will be useful,
646
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
647
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
648
+ GNU Affero General Public License for more details.
649
+
650
+ You should have received a copy of the GNU Affero General Public License
651
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
652
+
653
+ Also add information on how to contact you by electronic and paper mail.
654
+
655
+ If your software can interact with users remotely through a computer
656
+ network, you should also make sure that it provides a way for users to
657
+ get its source. For example, if your program is a web application, its
658
+ interface could display a "Source" link that leads users to an archive
659
+ of the code. There are many ways you could offer source, and different
660
+ solutions will be better for different programs; see section 13 for the
661
+ specific requirements.
662
+
663
+ You should also get your employer (if you work as a programmer) or school,
664
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
665
+ For more information on this, and how to apply and follow the GNU AGPL, see
666
+ <https://www.gnu.org/licenses/>.
Repositories/DeepLabCut-live/MANIFEST.in ADDED
@@ -0,0 +1 @@
 
 
1
+ include dlclive/check_install/*
Repositories/DeepLabCut-live/README.md ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DeepLabCut-live! SDK<img src="https://images.squarespace-cdn.com/content/v1/57f6d51c9f74566f55ecf271/1606082050387-M8M1CFI5DFUZCBAAUI0W/ke17ZwdGBToddI8pDm48kLuMKy7Ws6mFofiFehYynfdZw-zPPgdn4jUwVcJE1ZvWQUxwkmyExglNqGp0IvTJZUJFbgE-7XRK3dMEBRBhUpzp2tFVMcEgqZM8QO7VXXQogrsLnYKC4n4YnYuHC1HMRWygQlqMNAoTF9HaycikLeg/DLClive.png?format=750w" width="350" title="DLC-live" alt="DLC LIVE!" align="right" vspace = "50">
2
+
3
+ <a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
4
+ ![PyPI - Python Version](https://img.shields.io/pypi/v/deeplabcut-live)
5
+ [![Downloads](https://pepy.tech/badge/deeplabcut-live)](https://pepy.tech/project/deeplabcut-live)
6
+ [![Downloads](https://pepy.tech/badge/deeplabcut-live/month)](https://pepy.tech/project/deeplabcut-live)
7
+ ![Python package](https://github.com/DeepLabCut/DeepLabCut-live/workflows/Python%20package/badge.svg)
8
+ [![GitHub stars](https://img.shields.io/github/stars/DeepLabCut/DeepLabCut-live.svg?style=social&label=Star)](https://github.com/DeepLabCut/DeepLabCut-live)
9
+ [![GitHub forks](https://img.shields.io/github/forks/DeepLabCut/DeepLabCut-live.svg?style=social&label=Fork)](https://github.com/DeepLabCut/DeepLabCut-live)
10
+ [![Image.sc forum](https://img.shields.io/badge/dynamic/json.svg?label=forum&amp;url=https%3A%2F%2Fforum.image.sc%2Ftags%2Fdeeplabcut.json&amp;query=%24.topic_list.tags.0.topic_count&amp;colorB=brightgreen&amp;&amp;suffix=%20topics&amp;logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAABPklEQVR42m3SyyqFURTA8Y2BER0TDyExZ+aSPIKUlPIITFzKeQWXwhBlQrmFgUzMMFLKZeguBu5y+//17dP3nc5vuPdee6299gohUYYaDGOyyACq4JmQVoFujOMR77hNfOAGM+hBOQqB9TjHD36xhAa04RCuuXeKOvwHVWIKL9jCK2bRiV284QgL8MwEjAneeo9VNOEaBhzALGtoRy02cIcWhE34jj5YxgW+E5Z4iTPkMYpPLCNY3hdOYEfNbKYdmNngZ1jyEzw7h7AIb3fRTQ95OAZ6yQpGYHMMtOTgouktYwxuXsHgWLLl+4x++Kx1FJrjLTagA77bTPvYgw1rRqY56e+w7GNYsqX6JfPwi7aR+Y5SA+BXtKIRfkfJAYgj14tpOF6+I46c4/cAM3UhM3JxyKsxiOIhH0IO6SH/A1Kb1WBeUjbkAAAAAElFTkSuQmCC)](https://forum.image.sc/tags/deeplabcut)
11
+ [![Gitter](https://badges.gitter.im/DeepLabCut/community.svg)](https://gitter.im/DeepLabCut/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
12
+ [![Twitter Follow](https://img.shields.io/twitter/follow/DeepLabCut.svg?label=DeepLabCut&style=social)](https://twitter.com/DeepLabCut)
13
+
14
+ This package contains a [DeepLabCut](http://www.mousemotorlab.org/deeplabcut) inference pipeline for real-time applications that has minimal (software) dependencies. Thus, it is as easy to install as possible (in particular, on atypical systems like [NVIDIA Jetson boards](https://developer.nvidia.com/buy-jetson)).
15
+
16
+ **Performance:** If you would like to see estimates on how your model should perform given different video sizes, neural network type, and hardware, please see: https://deeplabcut.github.io/DLC-inferencespeed-benchmark/
17
+
18
+ If you have different hardware, please consider submitting your results too! https://github.com/DeepLabCut/DLC-inferencespeed-benchmark
19
+
20
+ **What this SDK provides:** This package provides a `DLCLive` class which enables pose estimation online to provide feedback. This object loads and prepares a DeepLabCut network for inference, and will return the predicted pose for single images.
21
+
22
+ To perform processing on poses (such as predicting the future pose of an animal given it's current pose, or to trigger external hardware like send TTL pulses to a laser for optogenetic stimulation), this object takes in a `Processor` object. Processor objects must contain two methods: process and save.
23
+
24
+ - The `process` method takes in a pose, performs some processing, and returns processed pose.
25
+ - The `save` method saves any valuable data created by or used by the processor
26
+
27
+ For more details and examples, see documentation [here](dlclive/processor/README.md).
28
+
29
+ ###### 🔥🔥🔥🔥🔥 Note :: alone, this object does not record video or capture images from a camera. This must be done separately, i.e. see our [DeepLabCut-live GUI](https://github.com/gkane26/DeepLabCut-live-GUI).🔥🔥🔥
30
+
31
+ ### News!
32
+ - March 2022: DeepLabCut-Live! 1.0.2 supports poetry installation `poetry install deeplabcut-live`, thanks to PR #60.
33
+ - March 2021: DeepLabCut-Live! [**version 1.0** is released](https://pypi.org/project/deeplabcut-live/), with support for tensorflow 1 and tensorflow 2!
34
+ - Feb 2021: DeepLabCut-Live! was featured in **Nature Methods**: ["Real-time behavioral analysis"](https://www.nature.com/articles/s41592-021-01072-z)
35
+ - Jan 2021: full **eLife** paper is published: ["Real-time, low-latency closed-loop feedback using markerless posture tracking"](https://elifesciences.org/articles/61909)
36
+ - Dec 2020: we talked to **RTS Suisse Radio** about DLC-Live!: ["Capture animal movements in real time"](https://www.rts.ch/play/radio/cqfd/audio/capturer-les-mouvements-des-animaux-en-temps-reel?id=11782529)
37
+
38
+
39
+ ### Installation:
40
+
41
+ Please see our instruction manual to install on a [Windows or Linux machine](docs/install_desktop.md) or on a [NVIDIA Jetson Development Board](docs/install_jetson.md). Note, this code works with tensorflow (TF) 1 or TF 2 models, but TF requires that whatever version you exported your model with, you must import with the same version (i.e., export with TF1.13, then use TF1.13 with DlC-Live; export with TF2.3, then use TF2.3 with DLC-live).
42
+
43
+ - available on pypi as: `pip install deeplabcut-live`
44
+
45
+ Note, you can then test your installation by running:
46
+
47
+ `dlc-live-test`
48
+
49
+ If installed properly, this script will i) create a temporary folder ii) download the full_dog model from the [DeepLabCut Model Zoo](http://www.mousemotorlab.org/dlc-modelzoo), iii) download a short video clip of a dog, and iv) run inference while displaying keypoints. v) remove the temporary folder.
50
+
51
+ <img src="https://images.squarespace-cdn.com/content/v1/57f6d51c9f74566f55ecf271/1606081086014-TG9GWH63ZGGOO7K779G3/ke17ZwdGBToddI8pDm48kHiSoSToKfKUI9t99vKErWoUqsxRUqqbr1mOJYKfIPR7LoDQ9mXPOjoJoqy81S2I8N_N4V1vUb5AoIIIbLZhVYxCRW4BPu10St3TBAUQYVKcOoIGycwr1shdgJWzLuxyzjLbSRGBFFxjYMBr42yCvRK5HHsLZWtMlAHzDU294nCd/dlclivetest.png?format=1000w" width="650" title="DLC-live-test" alt="DLC LIVE TEST" align="center" vspace = "50">
52
+
53
+ ### Quick Start: instructions for use:
54
+
55
+ 1. Initialize `Processor` (if desired)
56
+ 2. Initialize the `DLCLive` object
57
+ 3. Perform pose estimation!
58
+
59
+ ```python
60
+ from dlclive import DLCLive, Processor
61
+ dlc_proc = Processor()
62
+ dlc_live = DLCLive(<path to exported model directory>, processor=dlc_proc)
63
+ dlc_live.init_inference(<your image>)
64
+ dlc_live.get_pose(<your image>)
65
+ ```
66
+
67
+ `DLCLive` **parameters:**
68
+
69
+ - `path` = string; full path to the exported DLC model directory
70
+ - `model_type` = string; the type of model to use for inference. Types include:
71
+ - `base` = the base DeepLabCut model
72
+ - `tensorrt` = apply [tensor-rt](https://developer.nvidia.com/tensorrt) optimizations to model
73
+ - `tflite` = use [tensorflow lite](https://www.tensorflow.org/lite) inference (in progress...)
74
+ - `cropping` = list of int, optional; cropping parameters in pixel number: [x1, x2, y1, y2]
75
+ - `dynamic` = tuple, optional; defines parameters for dynamic cropping of images
76
+ - `index 0` = use dynamic cropping, bool
77
+ - `index 1` = detection threshold, float
78
+ - `index 2` = margin (in pixels) around identified points, int
79
+ - `resize` = float, optional; factor by which to resize image (resize=0.5 downsizes both width and height of image by half). Can be used to downsize large images for faster inference
80
+ - `processor` = dlc pose processor object, optional
81
+ - `display` = bool, optional; display processed image with DeepLabCut points? Can be used to troubleshoot cropping and resizing parameters, but is very slow
82
+
83
+ `DLCLive` **inputs:**
84
+
85
+ - `<path to exported model directory>` = path to the folder that has the `.pb` files that you acquire after running `deeplabcut.export_model`
86
+ - `<your image>` = is a numpy array of each frame
87
+
88
+
89
+ ### Benchmarking/Analyzing your exported DeepLabCut models
90
+
91
+ DeepLabCut-live offers some analysis tools that allow users to peform the following operations on videos, from python or from the command line:
92
+
93
+ 1. Test inference speed across a range of image sizes, downsizing images by specifying the `resize` or `pixels` parameter. Using the `pixels` parameter will resize images to the desired number of `pixels`, without changing the aspect ratio. Results will be saved (along with system info) to a pickle file if you specify an output directory.
94
+ ##### python
95
+ ```python
96
+ dlclive.benchmark_videos('/path/to/exported/model', ['/path/to/video1', '/path/to/video2'], output='/path/to/output', resize=[1.0, 0.75, '0.5'])
97
+ ```
98
+ ##### command line
99
+ ```
100
+ dlc-live-benchmark /path/to/exported/model /path/to/video1 /path/to/video2 -o /path/to/output -r 1.0 0.75 0.5
101
+ ```
102
+
103
+ 2. Display keypoints to visually inspect the accuracy of exported models on different image sizes (note, this is slow and only for testing purposes):
104
+
105
+ ##### python
106
+ ```python
107
+ dlclive.benchmark_videos('/path/to/exported/model', '/path/to/video', resize=0.5, display=True, pcutoff=0.5, display_radius=4, cmap='bmy')
108
+ ```
109
+ ##### command line
110
+ ```
111
+ dlc-live-benchmark /path/to/exported/model /path/to/video -r 0.5 --display --pcutoff 0.5 --display-radius 4 --cmap bmy
112
+ ```
113
+
114
+ 3. Analyze and create a labeled video using the exported model and desired resize parameters. This option functions similar to `deeplabcut.benchmark_videos` and `deeplabcut.create_labeled_video` (note, this is slow and only for testing purposes).
115
+
116
+ ##### python
117
+ ```python
118
+ dlclive.benchmark_videos('/path/to/exported/model', '/path/to/video', resize=[1.0, 0.75, 0.5], pcutoff=0.5, display_radius=4, cmap='bmy', save_poses=True, save_video=True)
119
+ ```
120
+ ##### command line
121
+ ```
122
+ dlc-live-benchmark /path/to/exported/model /path/to/video -r 0.5 --pcutoff 0.5 --display-radius 4 --cmap bmy --save-poses --save-video
123
+ ```
124
+
125
+ ## License:
126
+
127
+ This project is licensed under the GNU AGPLv3. Note that the software is provided "as is", without warranty of any kind, express or implied. If you use the code or data, we ask that you please cite us! This software is available for licensing via the EPFL Technology Transfer Office (https://tto.epfl.ch/, info.tto@epfl.ch).
128
+
129
+ ## Community Support, Developers, & Help:
130
+
131
+ This is an actively developed package and we welcome community development and involvement.
132
+
133
+ - If you want to contribute to the code, please read our guide [here](https://github.com/DeepLabCut/DeepLabCut/blob/master/CONTRIBUTING.md), which is provided at the main repository of DeepLabCut.
134
+
135
+ - We are a community partner on the [![Image.sc forum](https://img.shields.io/badge/dynamic/json.svg?label=forum&amp;url=https%3A%2F%2Fforum.image.sc%2Ftags%2Fdeeplabcut.json&amp;query=%24.topic_list.tags.0.topic_count&amp;colorB=brightgreen&amp;&amp;suffix=%20topics&amp;logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAABPklEQVR42m3SyyqFURTA8Y2BER0TDyExZ+aSPIKUlPIITFzKeQWXwhBlQrmFgUzMMFLKZeguBu5y+//17dP3nc5vuPdee6299gohUYYaDGOyyACq4JmQVoFujOMR77hNfOAGM+hBOQqB9TjHD36xhAa04RCuuXeKOvwHVWIKL9jCK2bRiV284QgL8MwEjAneeo9VNOEaBhzALGtoRy02cIcWhE34jj5YxgW+E5Z4iTPkMYpPLCNY3hdOYEfNbKYdmNngZ1jyEzw7h7AIb3fRTQ95OAZ6yQpGYHMMtOTgouktYwxuXsHgWLLl+4x++Kx1FJrjLTagA77bTPvYgw1rRqY56e+w7GNYsqX6JfPwi7aR+Y5SA+BXtKIRfkfJAYgj14tpOF6+I46c4/cAM3UhM3JxyKsxiOIhH0IO6SH/A1Kb1WBeUjbkAAAAAElFTkSuQmCC)](https://forum.image.sc/tags/deeplabcut). Please post help and support questions on the forum with the tag DeepLabCut. Check out their mission statement [Scientific Community Image Forum: A discussion forum for scientific image software](https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.3000340).
136
+
137
+ - If you encounter a previously unreported bug/code issue, please post here (we encourage you to search issues first): https://github.com/DeepLabCut/DeepLabCut-live/issues
138
+
139
+ - For quick discussions here: [![Gitter](https://badges.gitter.im/DeepLabCut/community.svg)](https://gitter.im/DeepLabCut/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
140
+
141
+ ### Reference:
142
+
143
+ If you utilize our tool, please [cite Kane et al, eLife 2020](https://elifesciences.org/articles/61909). The preprint is available here: https://www.biorxiv.org/content/10.1101/2020.08.04.236422v2
144
+
145
+ ```
146
+ @Article{Kane2020dlclive,
147
+ author = {Kane, Gary and Lopes, Gonçalo and Sanders, Jonny and Mathis, Alexander and Mathis, Mackenzie},
148
+ title = {Real-time, low-latency closed-loop feedback using markerless posture tracking},
149
+ journal = {eLife},
150
+ year = {2020},
151
+ }
152
+ ```
153
+
Repositories/DeepLabCut-live/benchmarking/run_dlclive_benchmark.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+ # Script for running the official benchmark from Kane et al, 2020.
9
+ # Please share your results at https://github.com/DeepLabCut/DLC-inferencespeed-benchmark
10
+
11
+ import os, pathlib
12
+ import glob
13
+
14
+ from dlclive import benchmark_videos, download_benchmarking_data
15
+
16
+ datafolder = os.path.join(
17
+ pathlib.Path(__file__).parent.absolute(), "Data-DLC-live-benchmark"
18
+ )
19
+
20
+ if not os.path.isdir(datafolder): # only download if data doesn't exist!
21
+ # Downloading data.... this takes a while (see terminal)
22
+ download_benchmarking_data(datafolder)
23
+
24
+ n_frames = 10000 # change to 10000 for testing on a GPU!
25
+ pixels = [2500, 10000, 40000, 160000, 320000, 640000]
26
+
27
+ dog_models = glob.glob(datafolder + "/dog/*[!avi]")
28
+ dog_video = glob.glob(datafolder + "/dog/*.avi")[0]
29
+ mouse_models = glob.glob(datafolder + "/mouse_lick/*[!avi]")
30
+ mouse_video = glob.glob(datafolder + "/mouse_lick/*.avi")[0]
31
+
32
+ this_dir = os.path.dirname(os.path.realpath(__file__))
33
+ # storing results in /benchmarking/results: (for your PR)
34
+ out_dir = os.path.normpath(this_dir + "/results")
35
+
36
+ if not os.path.isdir(out_dir):
37
+ os.mkdir(out_dir)
38
+
39
+ for m in dog_models:
40
+ benchmark_videos(m, dog_video, output=out_dir, n_frames=n_frames, pixels=pixels)
41
+
42
+ for m in mouse_models:
43
+ benchmark_videos(m, mouse_video, output=out_dir, n_frames=n_frames, pixels=pixels)
Repositories/DeepLabCut-live/dlclive/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+ from dlclive.version import __version__, VERSION
9
+ from dlclive.dlclive import DLCLive
10
+ from dlclive.processor import Processor
11
+ from dlclive.benchmark import benchmark, benchmark_videos, download_benchmarking_data
Repositories/DeepLabCut-live/dlclive/benchmark.py ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import platform
10
+ import os
11
+ import time
12
+ import sys
13
+ import warnings
14
+ import subprocess
15
+ import typing
16
+ import pickle
17
+ import colorcet as cc
18
+ from PIL import ImageColor
19
+ import ruamel
20
+
21
+ try:
22
+ from pip._internal.operations import freeze
23
+ except ImportError:
24
+ from pip.operations import freeze
25
+
26
+ from tqdm import tqdm
27
+ import numpy as np
28
+ import tensorflow as tf
29
+ import cv2
30
+
31
+ from dlclive import DLCLive
32
+ from dlclive import VERSION
33
+ from dlclive import __file__ as dlcfile
34
+
35
+ from dlclive.utils import decode_fourcc
36
+
37
+
38
+ def download_benchmarking_data(
39
+ target_dir=".",
40
+ url="http://deeplabcut.rowland.harvard.edu/datasets/dlclivebenchmark.tar.gz",
41
+ ):
42
+ """
43
+ Downloads a DeepLabCut-Live benchmarking Data (videos & DLC models).
44
+ """
45
+ import urllib.request
46
+ import tarfile
47
+ from tqdm import tqdm
48
+
49
+ def show_progress(count, block_size, total_size):
50
+ pbar.update(block_size)
51
+
52
+ def tarfilenamecutting(tarf):
53
+ """' auxfun to extract folder path
54
+ ie. /xyz-trainsetxyshufflez/
55
+ """
56
+ for memberid, member in enumerate(tarf.getmembers()):
57
+ if memberid == 0:
58
+ parent = str(member.path)
59
+ l = len(parent) + 1
60
+ if member.path.startswith(parent):
61
+ member.path = member.path[l:]
62
+ yield member
63
+
64
+ response = urllib.request.urlopen(url)
65
+ print(
66
+ "Downloading the benchmarking data from the DeepLabCut server @Harvard -> Go Crimson!!! {}....".format(
67
+ url
68
+ )
69
+ )
70
+ total_size = int(response.getheader("Content-Length"))
71
+ pbar = tqdm(unit="B", total=total_size, position=0)
72
+ filename, _ = urllib.request.urlretrieve(url, reporthook=show_progress)
73
+ with tarfile.open(filename, mode="r:gz") as tar:
74
+ tar.extractall(target_dir, members=tarfilenamecutting(tar))
75
+
76
+
77
+ def get_system_info() -> dict:
78
+ """ Return summary info for system running benchmark
79
+ Returns
80
+ -------
81
+ dict
82
+ Dictionary containing the following system information:
83
+ * ``host_name`` (str): name of machine
84
+ * ``op_sys`` (str): operating system
85
+ * ``python`` (str): path to python (which conda/virtual environment)
86
+ * ``device`` (tuple): (device type (``'GPU'`` or ``'CPU'```), device information)
87
+ * ``freeze`` (list): list of installed packages and versions
88
+ * ``python_version`` (str): python version
89
+ * ``git_hash`` (str, None): If installed from git repository, hash of HEAD commit
90
+ * ``dlclive_version`` (str): dlclive version from :data:`dlclive.VERSION`
91
+ """
92
+
93
+ # get os
94
+
95
+ op_sys = platform.platform()
96
+ host_name = platform.node().replace(" ", "")
97
+
98
+ # A string giving the absolute path of the executable binary for the Python interpreter, on systems where this makes sense.
99
+ if platform.system() == "Windows":
100
+ host_python = sys.executable.split(os.path.sep)[-2]
101
+ else:
102
+ host_python = sys.executable.split(os.path.sep)[-3]
103
+
104
+ # try to get git hash if possible
105
+ dlc_basedir = os.path.dirname(os.path.dirname(dlcfile))
106
+ git_hash = None
107
+ try:
108
+ git_hash = subprocess.check_output(
109
+ ["git", "rev-parse", "HEAD"], cwd=dlc_basedir
110
+ )
111
+ git_hash = git_hash.decode("utf-8").rstrip("\n")
112
+ except subprocess.CalledProcessError:
113
+ # not installed from git repo, eg. pypi
114
+ # fine, pass quietly
115
+ pass
116
+
117
+ # get device info (GPU or CPU)
118
+ dev = None
119
+ if tf.test.is_gpu_available():
120
+ gpu_name = tf.test.gpu_device_name()
121
+ from tensorflow.python.client import device_lib
122
+
123
+ dev_desc = [
124
+ d.physical_device_desc
125
+ for d in device_lib.list_local_devices()
126
+ if d.name == gpu_name
127
+ ]
128
+ dev = [d.split(",")[1].split(":")[1].strip() for d in dev_desc]
129
+ dev_type = "GPU"
130
+ else:
131
+ from cpuinfo import get_cpu_info
132
+
133
+ dev = [get_cpu_info()["brand"]]
134
+ dev_type = "CPU"
135
+
136
+ return {
137
+ "host_name": host_name,
138
+ "op_sys": op_sys,
139
+ "python": host_python,
140
+ "device_type": dev_type,
141
+ "device": dev,
142
+ # pip freeze to get versions of all packages
143
+ "freeze": list(freeze.freeze()),
144
+ "python_version": sys.version,
145
+ "git_hash": git_hash,
146
+ "dlclive_version": VERSION,
147
+ }
148
+
149
+
150
+ def benchmark(
151
+ model_path,
152
+ video_path,
153
+ tf_config=None,
154
+ resize=None,
155
+ pixels=None,
156
+ cropping=None,
157
+ dynamic=(False, 0.5, 10),
158
+ n_frames=1000,
159
+ print_rate=False,
160
+ display=False,
161
+ pcutoff=0.0,
162
+ display_radius=3,
163
+ cmap="bmy",
164
+ save_poses=False,
165
+ save_video=False,
166
+ output=None,
167
+ ) -> typing.Tuple[np.ndarray, tuple, bool, dict]:
168
+ """ Analyze DeepLabCut-live exported model on a video:
169
+ Calculate inference time,
170
+ display keypoints, or
171
+ get poses/create a labeled video
172
+
173
+ Parameters
174
+ ----------
175
+ model_path : str
176
+ path to exported DeepLabCut model
177
+ video_path : str
178
+ path to video file
179
+ tf_config : :class:`tensorflow.ConfigProto`
180
+ tensorflow session configuration
181
+ resize : int, optional
182
+ resize factor. Can only use one of resize or pixels. If both are provided, will use pixels. by default None
183
+ pixels : int, optional
184
+ downsize image to this number of pixels, maintaining aspect ratio. Can only use one of resize or pixels. If both are provided, will use pixels. by default None
185
+ cropping : list of int
186
+ cropping parameters in pixel number: [x1, x2, y1, y2]
187
+ dynamic: triple containing (state, detectiontreshold, margin)
188
+ If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
189
+ then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This window is
190
+ expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
191
+ current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
192
+ enough given the movement of the animal)
193
+ n_frames : int, optional
194
+ number of frames to run inference on, by default 1000
195
+ print_rate : bool, optional
196
+ flat to print inference rate frame by frame, by default False
197
+ display : bool, optional
198
+ flag to display keypoints on images. Useful for checking the accuracy of exported models.
199
+ pcutoff : float, optional
200
+ likelihood threshold to display keypoints
201
+ display_radius : int, optional
202
+ size (radius in pixels) of keypoint to display
203
+ cmap : str, optional
204
+ a string indicating the :package:`colorcet` colormap, `options here <https://colorcet.holoviz.org/>`, by default "bmy"
205
+ save_poses : bool, optional
206
+ flag to save poses to an hdf5 file. If True, operates similar to :function:`DeepLabCut.benchmark_videos`, by default False
207
+ save_video : bool, optional
208
+ flag to save a labeled video. If True, operates similar to :function:`DeepLabCut.create_labeled_video`, by default False
209
+ output : str, optional
210
+ path to directory to save pose and/or video file. If not specified, will use the directory of video_path, by default None
211
+
212
+ Returns
213
+ -------
214
+ :class:`numpy.ndarray`
215
+ vector of inference times
216
+ tuple
217
+ (image width, image height)
218
+ bool
219
+ tensorflow inference flag
220
+ dict
221
+ metadata for video
222
+
223
+ Example
224
+ -------
225
+ Return a vector of inference times for 10000 frames:
226
+ dlclive.benchmark('/my/exported/model', 'my_video.avi', n_frames=10000)
227
+
228
+ Return a vector of inference times, resizing images to half the width and height for inference
229
+ dlclive.benchmark('/my/exported/model', 'my_video.avi', n_frames=10000, resize=0.5)
230
+
231
+ Display keypoints to check the accuracy of an exported model
232
+ dlclive.benchmark('/my/exported/model', 'my_video.avi', display=True)
233
+
234
+ Analyze a video (save poses to hdf5) and create a labeled video, similar to :function:`DeepLabCut.benchmark_videos` and :function:`create_labeled_video`
235
+ dlclive.benchmark('/my/exported/model', 'my_video.avi', save_poses=True, save_video=True)
236
+ """
237
+
238
+ ### load video
239
+
240
+ cap = cv2.VideoCapture(video_path)
241
+ ret, frame = cap.read()
242
+ n_frames = (
243
+ n_frames
244
+ if (n_frames > 0) and (n_frames < cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1)
245
+ else (cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1)
246
+ )
247
+ n_frames = int(n_frames)
248
+ im_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
249
+
250
+ ### get resize factor
251
+
252
+ if pixels is not None:
253
+ resize = np.sqrt(pixels / (im_size[0] * im_size[1]))
254
+ if resize is not None:
255
+ im_size = (int(im_size[0] * resize), int(im_size[1] * resize))
256
+
257
+ ### create video writer
258
+
259
+ if save_video:
260
+ colors = None
261
+ out_dir = (
262
+ output
263
+ if output is not None
264
+ else os.path.dirname(os.path.realpath(video_path))
265
+ )
266
+ out_vid_base = os.path.basename(video_path)
267
+ out_vid_file = os.path.normpath(
268
+ f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_LABELED.avi"
269
+ )
270
+ fourcc = cv2.VideoWriter_fourcc(*"DIVX")
271
+ fps = cap.get(cv2.CAP_PROP_FPS)
272
+ vwriter = cv2.VideoWriter(out_vid_file, fourcc, fps, im_size)
273
+
274
+ ### check for pandas installation if using save_poses flag
275
+
276
+ if save_poses:
277
+ try:
278
+ import pandas as pd
279
+
280
+ use_pandas = True
281
+ except:
282
+ use_pandas = False
283
+ warnings.warn(
284
+ "Could not find installation of pandas; saving poses as a numpy array with the dimensions (n_frames, n_keypoints, [x, y, likelihood])."
285
+ )
286
+
287
+ ### initialize DLCLive and perform inference
288
+
289
+ inf_times = np.zeros(n_frames)
290
+ poses = []
291
+
292
+ live = DLCLive(
293
+ model_path,
294
+ tf_config=tf_config,
295
+ resize=resize,
296
+ cropping=cropping,
297
+ dynamic=dynamic,
298
+ display=display,
299
+ pcutoff=pcutoff,
300
+ display_radius=display_radius,
301
+ display_cmap=cmap,
302
+ )
303
+
304
+ poses.append(live.init_inference(frame))
305
+ TFGPUinference = True if len(live.outputs) == 1 else False
306
+
307
+ iterator = range(n_frames) if (print_rate) or (display) else tqdm(range(n_frames))
308
+ for i in iterator:
309
+
310
+ ret, frame = cap.read()
311
+
312
+ if not ret:
313
+ warnings.warn(
314
+ "Did not complete {:d} frames. There probably were not enough frames in the video {}.".format(
315
+ n_frames, video_path
316
+ )
317
+ )
318
+ break
319
+
320
+ start_pose = time.time()
321
+ poses.append(live.get_pose(frame))
322
+ inf_times[i] = time.time() - start_pose
323
+
324
+ if save_video:
325
+
326
+ if colors is None:
327
+ all_colors = getattr(cc, cmap)
328
+ colors = [
329
+ ImageColor.getcolor(c, "RGB")[::-1]
330
+ for c in all_colors[:: int(len(all_colors) / poses[-1].shape[0])]
331
+ ]
332
+
333
+ this_pose = poses[-1]
334
+ for j in range(this_pose.shape[0]):
335
+ if this_pose[j, 2] > pcutoff:
336
+ x = int(this_pose[j, 0])
337
+ y = int(this_pose[j, 1])
338
+ frame = cv2.circle(
339
+ frame, (x, y), display_radius, colors[j], thickness=-1
340
+ )
341
+
342
+ if resize is not None:
343
+ frame = cv2.resize(frame, im_size)
344
+ vwriter.write(frame)
345
+
346
+ if print_rate:
347
+ print("pose rate = {:d}".format(int(1 / inf_times[i])))
348
+
349
+ if print_rate:
350
+ print("mean pose rate = {:d}".format(int(np.mean(1 / inf_times))))
351
+
352
+ ### gather video and test parameterization
353
+
354
+ # dont want to fail here so gracefully failing on exception --
355
+ # eg. some packages of cv2 don't have CAP_PROP_CODEC_PIXEL_FORMAT
356
+ try:
357
+ fourcc = decode_fourcc(cap.get(cv2.CAP_PROP_FOURCC))
358
+ except:
359
+ fourcc = ""
360
+
361
+ try:
362
+ fps = round(cap.get(cv2.CAP_PROP_FPS))
363
+ except:
364
+ fps = None
365
+
366
+ try:
367
+ pix_fmt = decode_fourcc(cap.get(cv2.CAP_PROP_CODEC_PIXEL_FORMAT))
368
+ except:
369
+ pix_fmt = ""
370
+
371
+ try:
372
+ frame_count = round(cap.get(cv2.CAP_PROP_FRAME_COUNT))
373
+ except:
374
+ frame_count = None
375
+
376
+ try:
377
+ orig_im_size = (
378
+ round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
379
+ round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
380
+ )
381
+ except:
382
+ orig_im_size = None
383
+
384
+ meta = {
385
+ "video_path": video_path,
386
+ "video_codec": fourcc,
387
+ "video_pixel_format": pix_fmt,
388
+ "video_fps": fps,
389
+ "video_total_frames": frame_count,
390
+ "original_frame_size": orig_im_size,
391
+ "dlclive_params": live.parameterization,
392
+ }
393
+
394
+ ### close video and tensorflow session
395
+
396
+ cap.release()
397
+ live.close()
398
+
399
+ if save_video:
400
+ vwriter.release()
401
+
402
+ if save_poses:
403
+
404
+ cfg_path = os.path.normpath(f"{model_path}/pose_cfg.yaml")
405
+ ruamel_file = ruamel.yaml.YAML()
406
+ dlc_cfg = ruamel_file.load(open(cfg_path, "r"))
407
+ bodyparts = dlc_cfg["all_joints_names"]
408
+ poses = np.array(poses)
409
+
410
+ if use_pandas:
411
+
412
+ poses = poses.reshape((poses.shape[0], poses.shape[1] * poses.shape[2]))
413
+ pdindex = pd.MultiIndex.from_product(
414
+ [bodyparts, ["x", "y", "likelihood"]], names=["bodyparts", "coords"]
415
+ )
416
+ pose_df = pd.DataFrame(poses, columns=pdindex)
417
+
418
+ out_dir = (
419
+ output
420
+ if output is not None
421
+ else os.path.dirname(os.path.realpath(video_path))
422
+ )
423
+ out_vid_base = os.path.basename(video_path)
424
+ out_dlc_file = os.path.normpath(
425
+ f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_POSES.h5"
426
+ )
427
+ pose_df.to_hdf(out_dlc_file, key="df_with_missing", mode="w")
428
+
429
+ else:
430
+
431
+ out_vid_base = os.path.basename(video_path)
432
+ out_dlc_file = os.path.normpath(
433
+ f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_POSES.npy"
434
+ )
435
+ np.save(out_dlc_file, poses)
436
+
437
+ return inf_times, im_size, TFGPUinference, meta
438
+
439
+
440
+ def save_inf_times(
441
+ sys_info, inf_times, im_size, TFGPUinference, model=None, meta=None, output=None
442
+ ):
443
+ """ Save inference time data collected using :function:`benchmark` with system information to a pickle file.
444
+ This is primarily used through :function:`benchmark_videos`
445
+
446
+
447
+ Parameters
448
+ ----------
449
+ sys_info : tuple
450
+ system information generated by :func:`get_system_info`
451
+ inf_times : :class:`numpy.ndarray`
452
+ array of inference times generated by :func:`benchmark`
453
+ im_size : tuple or :class:`numpy.ndarray`
454
+ image size (width, height) for each benchmark run. If an array, each row corresponds to a row in inf_times
455
+ TFGPUinference: bool
456
+ flag if using tensorflow inference or numpy inference DLC model
457
+ model: str, optional
458
+ name of model
459
+ meta : dict, optional
460
+ metadata returned by :func:`benchmark`
461
+ output : str, optional
462
+ path to directory to save data. If None, uses pwd, by default None
463
+
464
+ Returns
465
+ -------
466
+ bool
467
+ flag indicating successful save
468
+ """
469
+
470
+ output = output if output is not None else os.getcwd()
471
+ model_type = None
472
+ if model is not None:
473
+ if "resnet" in model:
474
+ model_type = "resnet"
475
+ elif "mobilenet" in model:
476
+ model_type = "mobilenet"
477
+ else:
478
+ model_type = None
479
+
480
+ fn_ind = 0
481
+ base_name = (
482
+ f"benchmark_{sys_info['host_name']}_{sys_info['device_type']}_{fn_ind}.pickle"
483
+ )
484
+ out_file = os.path.normpath(f"{output}/{base_name}")
485
+ while os.path.isfile(out_file):
486
+ fn_ind += 1
487
+ base_name = f"benchmark_{sys_info['host_name']}_{sys_info['device_type']}_{fn_ind}.pickle"
488
+ out_file = os.path.normpath(f"{output}/{base_name}")
489
+
490
+ # summary stats (mean inference time & standard error of mean)
491
+ stats = zip(
492
+ np.mean(inf_times, 1),
493
+ np.std(inf_times, 1) * 1.0 / np.sqrt(np.shape(inf_times)[1]),
494
+ )
495
+
496
+ # for stat in stats:
497
+ # print("Stats:", stat)
498
+
499
+ data = {
500
+ "model": model,
501
+ "model_type": model_type,
502
+ "TFGPUinference": TFGPUinference,
503
+ "im_size": im_size,
504
+ "inference_times": inf_times,
505
+ "stats": stats,
506
+ }
507
+
508
+ data.update(sys_info)
509
+ if meta:
510
+ data.update(meta)
511
+
512
+ os.makedirs(os.path.normpath(output), exist_ok=True)
513
+ pickle.dump(data, open(out_file, "wb"))
514
+
515
+ return True
516
+
517
+
518
+ def benchmark_videos(
519
+ model_path,
520
+ video_path,
521
+ output=None,
522
+ n_frames=1000,
523
+ tf_config=None,
524
+ resize=None,
525
+ pixels=None,
526
+ cropping=None,
527
+ dynamic=(False, 0.5, 10),
528
+ print_rate=False,
529
+ display=False,
530
+ pcutoff=0.5,
531
+ display_radius=3,
532
+ cmap="bmy",
533
+ save_poses=False,
534
+ save_video=False,
535
+ ):
536
+ """Analyze videos using DeepLabCut-live exported models.
537
+ Analyze multiple videos and/or multiple options for the size of the video
538
+ by specifying a resizing factor or the number of pixels to use in the image (keeping aspect ratio constant).
539
+ Options to record inference times (to examine inference speed),
540
+ display keypoints to visually check the accuracy,
541
+ or save poses to an hdf5 file as in :function:`deeplabcut.benchmark_videos` and
542
+ create a labeled video as in :function:`deeplabcut.create_labeled_video`.
543
+
544
+ Parameters
545
+ ----------
546
+ model_path : str
547
+ path to exported DeepLabCut model
548
+ video_path : str or list
549
+ path to video file or list of paths to video files
550
+ output : str
551
+ path to directory to save results
552
+ tf_config : :class:`tensorflow.ConfigProto`
553
+ tensorflow session configuration
554
+ resize : int, optional
555
+ resize factor. Can only use one of resize or pixels. If both are provided, will use pixels. by default None
556
+ pixels : int, optional
557
+ downsize image to this number of pixels, maintaining aspect ratio. Can only use one of resize or pixels. If both are provided, will use pixels. by default None
558
+ cropping : list of int
559
+ cropping parameters in pixel number: [x1, x2, y1, y2]
560
+ dynamic: triple containing (state, detectiontreshold, margin)
561
+ If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
562
+ then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This window is
563
+ expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
564
+ current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
565
+ enough given the movement of the animal)
566
+ n_frames : int, optional
567
+ number of frames to run inference on, by default 1000
568
+ print_rate : bool, optional
569
+ flat to print inference rate frame by frame, by default False
570
+ display : bool, optional
571
+ flag to display keypoints on images. Useful for checking the accuracy of exported models.
572
+ pcutoff : float, optional
573
+ likelihood threshold to display keypoints
574
+ display_radius : int, optional
575
+ size (radius in pixels) of keypoint to display
576
+ cmap : str, optional
577
+ a string indicating the :package:`colorcet` colormap, `options here <https://colorcet.holoviz.org/>`, by default "bmy"
578
+ save_poses : bool, optional
579
+ flag to save poses to an hdf5 file. If True, operates similar to :function:`DeepLabCut.benchmark_videos`, by default False
580
+ save_video : bool, optional
581
+ flag to save a labeled video. If True, operates similar to :function:`DeepLabCut.create_labeled_video`, by default False
582
+
583
+ Example
584
+ -------
585
+ Return a vector of inference times for 10000 frames on one video or two videos:
586
+ dlclive.benchmark_videos('/my/exported/model', 'my_video.avi', n_frames=10000)
587
+ dlclive.benchmark_videos('/my/exported/model', ['my_video1.avi', 'my_video2.avi'], n_frames=10000)
588
+
589
+ Return a vector of inference times, testing full size and resizing images to half the width and height for inference, for two videos
590
+ dlclive.benchmark_videos('/my/exported/model', ['my_video1.avi', 'my_video2.avi'], n_frames=10000, resize=[1.0, 0.5])
591
+
592
+ Display keypoints to check the accuracy of an exported model
593
+ dlclive.benchmark_videos('/my/exported/model', 'my_video.avi', display=True)
594
+
595
+ Analyze a video (save poses to hdf5) and create a labeled video, similar to :function:`DeepLabCut.benchmark_videos` and :function:`create_labeled_video`
596
+ dlclive.benchmark_videos('/my/exported/model', 'my_video.avi', save_poses=True, save_video=True)
597
+ """
598
+
599
+ # convert video_paths to list
600
+
601
+ video_path = video_path if type(video_path) is list else [video_path]
602
+
603
+ # fix resize
604
+
605
+ if pixels:
606
+ pixels = pixels if type(pixels) is list else [pixels]
607
+ resize = [None for p in pixels]
608
+ elif resize:
609
+ resize = resize if type(resize) is list else [resize]
610
+ pixels = [None for r in resize]
611
+ else:
612
+ resize = [None]
613
+ pixels = [None]
614
+
615
+ # loop over videos
616
+
617
+ for v in video_path:
618
+
619
+ # initialize full inference times
620
+
621
+ inf_times = []
622
+ im_size_out = []
623
+
624
+ for i in range(len(resize)):
625
+
626
+ print(f"\nRun {i+1} / {len(resize)}\n")
627
+
628
+ this_inf_times, this_im_size, TFGPUinference, meta = benchmark(
629
+ model_path,
630
+ v,
631
+ tf_config=tf_config,
632
+ resize=resize[i],
633
+ pixels=pixels[i],
634
+ cropping=cropping,
635
+ dynamic=dynamic,
636
+ n_frames=n_frames,
637
+ print_rate=print_rate,
638
+ display=display,
639
+ pcutoff=pcutoff,
640
+ display_radius=display_radius,
641
+ cmap=cmap,
642
+ save_poses=save_poses,
643
+ save_video=save_video,
644
+ output=output,
645
+ )
646
+
647
+ inf_times.append(this_inf_times)
648
+ im_size_out.append(this_im_size)
649
+
650
+ inf_times = np.array(inf_times)
651
+ im_size_out = np.array(im_size_out)
652
+
653
+ # save results
654
+
655
+ if output is not None:
656
+ sys_info = get_system_info()
657
+ save_inf_times(
658
+ sys_info,
659
+ inf_times,
660
+ im_size_out,
661
+ TFGPUinference,
662
+ model=os.path.basename(model_path),
663
+ meta=meta,
664
+ output=output,
665
+ )
666
+
667
+
668
+ def main():
669
+ """Provides a command line interface :function:`benchmark_videos`
670
+ """
671
+
672
+ import argparse
673
+
674
+ parser = argparse.ArgumentParser()
675
+ parser.add_argument("model_path", type=str)
676
+ parser.add_argument("video_path", type=str, nargs="+")
677
+ parser.add_argument("-o", "--output", type=str, default=None)
678
+ parser.add_argument("-n", "--n-frames", type=int, default=1000)
679
+ parser.add_argument("-r", "--resize", type=float, nargs="+")
680
+ parser.add_argument("-p", "--pixels", type=float, nargs="+")
681
+ parser.add_argument("-v", "--print-rate", default=False, action="store_true")
682
+ parser.add_argument("-d", "--display", default=False, action="store_true")
683
+ parser.add_argument("-l", "--pcutoff", default=0.5, type=float)
684
+ parser.add_argument("-s", "--display-radius", default=3, type=int)
685
+ parser.add_argument("-c", "--cmap", type=str, default="bmy")
686
+ parser.add_argument("--cropping", nargs="+", type=int, default=None)
687
+ parser.add_argument("--dynamic", nargs="+", type=float, default=[])
688
+ parser.add_argument("--save-poses", action="store_true")
689
+ parser.add_argument("--save-video", action="store_true")
690
+ args = parser.parse_args()
691
+
692
+ if (args.cropping) and (len(args.cropping) < 4):
693
+ raise Exception(
694
+ "Cropping not properly specified. Must provide 4 values: x1, x2, y1, y2"
695
+ )
696
+
697
+ if not args.dynamic:
698
+ args.dynamic = (False, 0.5, 10)
699
+ elif len(args.dynamic) < 3:
700
+ raise Exception(
701
+ "Dynamic cropping not properly specified. Must provide three values: 0 or 1 as boolean flag, pcutoff, and margin"
702
+ )
703
+ else:
704
+ args.dynamic = (bool(args.dynamic[0]), args.dynamic[1], args.dynamic[2])
705
+
706
+ benchmark_videos(
707
+ args.model_path,
708
+ args.video_path,
709
+ output=args.output,
710
+ resize=args.resize,
711
+ pixels=args.pixels,
712
+ cropping=args.cropping,
713
+ dynamic=args.dynamic,
714
+ n_frames=args.n_frames,
715
+ print_rate=args.print_rate,
716
+ display=args.display,
717
+ pcutoff=args.pcutoff,
718
+ display_radius=args.display_radius,
719
+ cmap=args.cmap,
720
+ save_poses=args.save_poses,
721
+ save_video=args.save_video,
722
+ )
723
+
724
+
725
+ if __name__ == "__main__":
726
+ main()
Repositories/DeepLabCut-live/dlclive/check_install/check_install.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import sys
10
+ import shutil
11
+ import warnings
12
+
13
+ from dlclive import benchmark_videos
14
+ import urllib.request
15
+ import argparse
16
+ from pathlib import Path
17
+ from dlclibrary.dlcmodelzoo.modelzoo_download import (
18
+ download_huggingface_model,
19
+ )
20
+
21
+
22
+ def urllib_pbar(count, blockSize, totalSize):
23
+ percent = int(count * blockSize * 100 / totalSize)
24
+ outstr = f"{round(percent)}%"
25
+ sys.stdout.write(outstr)
26
+ sys.stdout.write("\b"*len(outstr))
27
+ sys.stdout.flush()
28
+
29
+
30
+ def main(display:bool=None):
31
+ parser = argparse.ArgumentParser(
32
+ description="Test DLC-Live installation by downloading and evaluating a demo DLC project!")
33
+ parser.add_argument('--nodisplay', action='store_false', help="Run the test without displaying tracking")
34
+ args = parser.parse_args()
35
+
36
+ if display is None:
37
+ display = args.nodisplay
38
+
39
+ if not display:
40
+ print('Running without displaying video')
41
+
42
+ # make temporary directory in $HOME
43
+ print("\nCreating temporary directory...\n")
44
+ tmp_dir = Path().home() / 'dlc-live-tmp'
45
+ tmp_dir.mkdir(mode=0o775,exist_ok=True)
46
+
47
+ video_file = str(tmp_dir / 'dog_clip.avi')
48
+ model_dir = tmp_dir / 'DLC_Dog_resnet_50_iteration-0_shuffle-0'
49
+
50
+ # download dog test video from github:
51
+ print(f"Downloading Video to {video_file}")
52
+ url_link = "https://github.com/DeepLabCut/DeepLabCut-live/blob/master/check_install/dog_clip.avi?raw=True"
53
+ urllib.request.urlretrieve(url_link, video_file, reporthook=urllib_pbar)
54
+
55
+ # download exported dog model from DeepLabCut Model Zoo
56
+ if Path(model_dir / 'snapshot-75000.pb').exists():
57
+ print('Model already downloaded, using cached version')
58
+ else:
59
+ print("Downloading full_dog model from the DeepLabCut Model Zoo...")
60
+ download_huggingface_model("full_dog", model_dir)
61
+
62
+ # assert these things exist so we can give informative error messages
63
+ assert Path(video_file).exists()
64
+ assert Path(model_dir / 'snapshot-75000.pb').exists()
65
+
66
+ # run benchmark videos
67
+ print("\n Running inference...\n")
68
+ # model_dir = "DLC_Dog_resnet_50_iteration-0_shuffle-0"
69
+ # print(video_file)
70
+ benchmark_videos(str(model_dir), video_file, display=display, resize=0.5, pcutoff=0.25)
71
+
72
+ # deleting temporary files
73
+ print("\n Deleting temporary files...\n")
74
+ try:
75
+ shutil.rmtree(tmp_dir)
76
+ except PermissionError:
77
+ warnings.warn(f'Could not delete temporary directory {str(tmp_dir)} due to a permissions error, but otherwise dlc-live seems to be working fine!')
78
+
79
+ print("\nDone!\n")
80
+
81
+
82
+ if __name__ == "__main__":
83
+
84
+
85
+ display = args.nodisplay
86
+
87
+
88
+ main(display=args.nodisplay)
Repositories/DeepLabCut-live/dlclive/display.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ from tkinter import Tk, Label
10
+ import colorcet as cc
11
+ from PIL import Image, ImageTk, ImageDraw
12
+
13
+
14
+ class Display(object):
15
+ """
16
+ Simple object to display frames with DLC labels.
17
+
18
+ Parameters
19
+ -----------
20
+ cmap : string
21
+ string indicating the Matoplotlib colormap to use.
22
+ pcutoff : float
23
+ likelihood threshold to display points
24
+ """
25
+
26
+ def __init__(self, cmap="bmy", radius=3, pcutoff=0.5):
27
+ """ Constructor method
28
+ """
29
+
30
+ self.cmap = cmap
31
+ self.colors = None
32
+ self.radius = radius
33
+ self.pcutoff = pcutoff
34
+ self.window = None
35
+
36
+ def set_display(self, im_size, bodyparts):
37
+ """ Create tkinter window to display image
38
+
39
+ Parameters
40
+ ----------
41
+ im_size : tuple
42
+ (width, height) of image
43
+ bodyparts : int
44
+ number of bodyparts
45
+ """
46
+
47
+ self.window = Tk()
48
+ self.window.title("DLC Live")
49
+ self.lab = Label(self.window)
50
+ self.lab.pack()
51
+
52
+ all_colors = getattr(cc, self.cmap)
53
+ self.colors = all_colors[:: int(len(all_colors) / bodyparts)]
54
+
55
+ def display_frame(self, frame, pose=None):
56
+ """
57
+ Display the image with DeepLabCut labels using opencv imshow
58
+
59
+ Parameters
60
+ -----------
61
+ frame :class:`numpy.ndarray`
62
+ an image as a numpy array
63
+
64
+ pose :class:`numpy.ndarray`
65
+ the pose estimated by DeepLabCut for the image
66
+ """
67
+
68
+ im_size = (frame.shape[1], frame.shape[0])
69
+
70
+ if pose is not None:
71
+
72
+ if self.window is None:
73
+ self.set_display(im_size, pose.shape[0])
74
+
75
+ img = Image.fromarray(frame)
76
+ draw = ImageDraw.Draw(img)
77
+
78
+ for i in range(pose.shape[0]):
79
+ if pose[i, 2] > self.pcutoff:
80
+ try:
81
+ x0 = (
82
+ pose[i, 0] - self.radius
83
+ if pose[i, 0] - self.radius > 0
84
+ else 0
85
+ )
86
+ x1 = (
87
+ pose[i, 0] + self.radius
88
+ if pose[i, 0] + self.radius < im_size[0]
89
+ else im_size[1]
90
+ )
91
+ y0 = (
92
+ pose[i, 1] - self.radius
93
+ if pose[i, 1] - self.radius > 0
94
+ else 0
95
+ )
96
+ y1 = (
97
+ pose[i, 1] + self.radius
98
+ if pose[i, 1] + self.radius < im_size[1]
99
+ else im_size[0]
100
+ )
101
+ coords = [x0, y0, x1, y1]
102
+ draw.ellipse(
103
+ coords, fill=self.colors[i], outline=self.colors[i]
104
+ )
105
+ except Exception as e:
106
+ print(e)
107
+
108
+ img_tk = ImageTk.PhotoImage(image=img, master=self.window)
109
+ self.lab.configure(image=img_tk)
110
+ self.window.update()
111
+
112
+ def destroy(self):
113
+ """
114
+ Destroys the opencv image window
115
+ """
116
+
117
+ self.window.destroy()
Repositories/DeepLabCut-live/dlclive/dlclive.py ADDED
@@ -0,0 +1,480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+ import os
9
+ import ruamel.yaml
10
+ import glob
11
+ import warnings
12
+ import numpy as np
13
+ import tensorflow as tf
14
+ import typing
15
+ from pathlib import Path
16
+ from typing import Optional, Tuple, List
17
+
18
+ try:
19
+ TFVER = [int(v) for v in tf.__version__.split(".")]
20
+ if TFVER[1] < 14:
21
+ from tensorflow.contrib.tensorrt import trt_convert as trt
22
+ else:
23
+ from tensorflow.python.compiler.tensorrt import trt_convert as trt
24
+ except Exception:
25
+ pass
26
+
27
+ from dlclive.graph import (
28
+ read_graph,
29
+ finalize_graph,
30
+ get_output_nodes,
31
+ get_output_tensors,
32
+ extract_graph,
33
+ )
34
+ from dlclive.pose import extract_cnn_output, argmax_pose_predict, multi_pose_predict
35
+ from dlclive.display import Display
36
+ from dlclive import utils
37
+ from dlclive.exceptions import DLCLiveError, DLCLiveWarning
38
+ if typing.TYPE_CHECKING:
39
+ from dlclive.processor import Processor
40
+
41
+ class DLCLive(object):
42
+ """
43
+ Object that loads a DLC network and performs inference on single images (e.g. images captured from a camera feed)
44
+
45
+ Parameters
46
+ -----------
47
+
48
+ path : string
49
+ Full path to exported model directory
50
+
51
+ model_type: string, optional
52
+ which model to use: 'base', 'tensorrt' for tensorrt optimized graph, 'lite' for tensorflow lite optimized graph
53
+
54
+ precision : string, optional
55
+ precision of model weights, only for model_type='tensorrt'. Can be 'FP16' (default), 'FP32', or 'INT8'
56
+
57
+ cropping : list of int
58
+ cropping parameters in pixel number: [x1, x2, y1, y2]
59
+
60
+ dynamic: triple containing (state, detectiontreshold, margin)
61
+ If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
62
+ then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This window is
63
+ expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
64
+ current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
65
+ enough given the movement of the animal).
66
+
67
+ resize : float, optional
68
+ Factor to resize the image.
69
+ For example, resize=0.5 will downsize both the height and width of the image by a factor of 2.
70
+
71
+ processor: dlc pose processor object, optional
72
+ User-defined processor object. Must contain two methods: process and save.
73
+ The 'process' method takes in a pose, performs some processing, and returns processed pose.
74
+ The 'save' method saves any valuable data created by or used by the processor
75
+ Processors can be used for two main purposes:
76
+ i) to run a forward predicting model that will predict the future pose from past history of poses (history can be stored in the processor object, but is not stored in this DLCLive object)
77
+ ii) to trigger external hardware based on pose estimation (e.g. see 'TeensyLaser' processor)
78
+
79
+ convert2rgb : bool, optional
80
+ boolean flag to convert frames from BGR to RGB color scheme
81
+
82
+ display : bool, optional
83
+ Display frames with DeepLabCut labels?
84
+ This is useful for testing model accuracy and cropping parameters, but it is very slow.
85
+
86
+ display_lik : float, optional
87
+ Likelihood threshold for display
88
+
89
+ display_raidus : int, optional
90
+ radius for keypoint display in pixels, default=3
91
+ """
92
+
93
+ PARAMETERS = (
94
+ "path",
95
+ "cfg",
96
+ "model_type",
97
+ "precision",
98
+ "cropping",
99
+ "dynamic",
100
+ "resize",
101
+ "processor",
102
+ )
103
+
104
+ def __init__(
105
+ self,
106
+ model_path:str,
107
+ model_type:str="base",
108
+ precision:str="FP32",
109
+ tf_config=None,
110
+ cropping:Optional[List[int]]=None,
111
+ dynamic:Tuple[bool, float, float]=(False, 0.5, 10),
112
+ resize:Optional[float]=None,
113
+ convert2rgb:bool=True,
114
+ processor:Optional['Processor']=None,
115
+ display:typing.Union[bool, Display]=False,
116
+ pcutoff:float=0.5,
117
+ display_radius:int=3,
118
+ display_cmap:str="bmy",
119
+ ):
120
+
121
+ self.path = model_path
122
+ self.cfg = None # type: typing.Optional[dict]
123
+ self.model_type = model_type
124
+ self.tf_config = tf_config
125
+ self.precision = precision
126
+ self.cropping = cropping
127
+ self.dynamic = dynamic
128
+ self.dynamic_cropping = None
129
+ self.resize = resize
130
+ self.processor = processor
131
+ self.convert2rgb = convert2rgb
132
+ if isinstance(display, Display):
133
+ self.display = display
134
+ elif display:
135
+ self.display = Display(pcutoff=pcutoff, radius=display_radius, cmap=display_cmap)
136
+ else:
137
+ self.display = None
138
+
139
+ self.sess = None
140
+ self.inputs = None
141
+ self.outputs = None
142
+ self.tflite_interpreter = None
143
+ self.pose = None
144
+ self.is_initialized = False
145
+
146
+ # checks
147
+
148
+ if self.model_type == "tflite" and self.dynamic[0]:
149
+ self.dynamic = (False, *self.dynamic[1:])
150
+ warnings.warn(
151
+ "Dynamic cropping is not supported for tensorflow lite inference. Dynamic cropping will not be used...",
152
+ DLCLiveWarning,
153
+ )
154
+
155
+ self.read_config()
156
+
157
+ def read_config(self):
158
+ """ Reads configuration yaml file
159
+
160
+ Raises
161
+ ------
162
+ FileNotFoundError
163
+ error thrown if pose configuration file does nott exist
164
+ """
165
+
166
+ cfg_path = Path(self.path).resolve() / "pose_cfg.yaml"
167
+ if not cfg_path.exists():
168
+ raise FileNotFoundError(
169
+ f"The pose configuration file for the exported model at {str(cfg_path)} was not found. Please check the path to the exported model directory"
170
+ )
171
+
172
+ ruamel_file = ruamel.yaml.YAML()
173
+ self.cfg = ruamel_file.load(open(str(cfg_path), "r"))
174
+
175
+ @property
176
+ def parameterization(self) -> dict:
177
+ """
178
+ Return
179
+ Returns
180
+ -------
181
+ """
182
+ return {param: getattr(self, param) for param in self.PARAMETERS}
183
+
184
+ def process_frame(self, frame):
185
+ """
186
+ Crops an image according to the object's cropping and dynamic properties.
187
+
188
+ Parameters
189
+ -----------
190
+ frame :class:`numpy.ndarray`
191
+ image as a numpy array
192
+
193
+ Returns
194
+ ----------
195
+ frame :class:`numpy.ndarray`
196
+ processed frame: convert type, crop, convert color
197
+ """
198
+
199
+ if frame.dtype != np.uint8:
200
+
201
+ frame = utils.convert_to_ubyte(frame)
202
+
203
+ if self.cropping:
204
+
205
+ frame = frame[
206
+ self.cropping[2] : self.cropping[3], self.cropping[0] : self.cropping[1]
207
+ ]
208
+
209
+ if self.dynamic[0]:
210
+
211
+ if self.pose is not None:
212
+
213
+ detected = self.pose[:, 2] > self.dynamic[1]
214
+
215
+ if np.any(detected):
216
+
217
+ x = self.pose[detected, 0]
218
+ y = self.pose[detected, 1]
219
+
220
+ x1 = int(max([0, int(np.amin(x)) - self.dynamic[2]]))
221
+ x2 = int(min([frame.shape[1], int(np.amax(x)) + self.dynamic[2]]))
222
+ y1 = int(max([0, int(np.amin(y)) - self.dynamic[2]]))
223
+ y2 = int(min([frame.shape[0], int(np.amax(y)) + self.dynamic[2]]))
224
+ self.dynamic_cropping = [x1, x2, y1, y2]
225
+
226
+ frame = frame[y1:y2, x1:x2]
227
+
228
+ else:
229
+
230
+ self.dynamic_cropping = None
231
+
232
+ if self.resize != 1:
233
+ frame = utils.resize_frame(frame, self.resize)
234
+
235
+ if self.convert2rgb:
236
+ frame = utils.img_to_rgb(frame)
237
+
238
+ return frame
239
+
240
+ def init_inference(self, frame=None, **kwargs):
241
+ """
242
+ Load model and perform inference on first frame -- the first inference is usually very slow.
243
+
244
+ Parameters
245
+ -----------
246
+ frame :class:`numpy.ndarray`
247
+ image as a numpy array
248
+
249
+ Returns
250
+ --------
251
+ pose :class:`numpy.ndarray`
252
+ the pose estimated by DeepLabCut for the input image
253
+ """
254
+
255
+ # get model file
256
+
257
+ model_file = glob.glob(os.path.normpath(self.path + "/*.pb"))[0]
258
+ if not os.path.isfile(model_file):
259
+ raise FileNotFoundError(
260
+ "The model file {} does not exist.".format(model_file)
261
+ )
262
+
263
+ # process frame
264
+
265
+ if frame is None and (self.model_type == "tflite"):
266
+ raise DLCLiveError(
267
+ "No image was passed to initialize inference. An image must be passed to the init_inference method"
268
+ )
269
+
270
+ if frame is not None:
271
+ if frame.ndim == 2:
272
+ self.convert2rgb = True
273
+ processed_frame = self.process_frame(frame)
274
+
275
+ # load model
276
+
277
+ if self.model_type == "base":
278
+
279
+ graph_def = read_graph(model_file)
280
+ graph = finalize_graph(graph_def)
281
+ self.sess, self.inputs, self.outputs = extract_graph(
282
+ graph, tf_config=self.tf_config
283
+ )
284
+
285
+ elif self.model_type == "tflite":
286
+
287
+ ###
288
+ # the frame size needed to initialize the tflite model as
289
+ # tflite does not support saving a model with dynamic input size
290
+ ###
291
+
292
+ # get input and output tensor names from graph_def
293
+ graph_def = read_graph(model_file)
294
+ graph = finalize_graph(graph_def)
295
+ output_nodes = get_output_nodes(graph)
296
+ output_nodes = [on.replace("DLC/", "") for on in output_nodes]
297
+
298
+ tf_version_2 = tf.__version__[0] == '2'
299
+
300
+ if tf_version_2:
301
+ converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
302
+ model_file,
303
+ ["Placeholder"],
304
+ output_nodes,
305
+ input_shapes={"Placeholder": [1, processed_frame.shape[0], processed_frame.shape[1], 3]},
306
+ )
307
+ else:
308
+ converter = tf.lite.TFLiteConverter.from_frozen_graph(
309
+ model_file,
310
+ ["Placeholder"],
311
+ output_nodes,
312
+ input_shapes={"Placeholder": [1, processed_frame.shape[0], processed_frame.shape[1], 3]},
313
+ )
314
+
315
+ try:
316
+ tflite_model = converter.convert()
317
+ except Exception:
318
+ raise DLCLiveError(
319
+ (
320
+ "This model cannot be converted to tensorflow lite format. "
321
+ "To use tensorflow lite for live inference, "
322
+ "make sure to set TFGPUinference=False "
323
+ "when exporting the model from DeepLabCut"
324
+ )
325
+ )
326
+
327
+ self.tflite_interpreter = tf.lite.Interpreter(model_content=tflite_model)
328
+ self.tflite_interpreter.allocate_tensors()
329
+ self.inputs = self.tflite_interpreter.get_input_details()
330
+ self.outputs = self.tflite_interpreter.get_output_details()
331
+
332
+ elif self.model_type == "tensorrt":
333
+
334
+ graph_def = read_graph(model_file)
335
+ graph = finalize_graph(graph_def)
336
+ output_tensors = get_output_tensors(graph)
337
+ output_tensors = [ot.replace("DLC/", "") for ot in output_tensors]
338
+
339
+ if (TFVER[0] > 1) | (TFVER[0] == 1 & TFVER[1] >= 14):
340
+ converter = trt.TrtGraphConverter(
341
+ input_graph_def=graph_def,
342
+ nodes_blacklist=output_tensors,
343
+ is_dynamic_op=True,
344
+ )
345
+ graph_def = converter.convert()
346
+ else:
347
+ graph_def = trt.create_inference_graph(
348
+ input_graph_def=graph_def,
349
+ outputs=output_tensors,
350
+ max_batch_size=1,
351
+ precision_mode=self.precision,
352
+ is_dynamic_op=True,
353
+ )
354
+
355
+ graph = finalize_graph(graph_def)
356
+ self.sess, self.inputs, self.outputs = extract_graph(
357
+ graph, tf_config=self.tf_config
358
+ )
359
+
360
+ else:
361
+
362
+ raise DLCLiveError(
363
+ "model_type = {} is not supported. model_type must be 'base', 'tflite', or 'tensorrt'".format(
364
+ self.model_type
365
+ )
366
+ )
367
+
368
+ # get pose of first frame (first inference is often very slow)
369
+
370
+ if frame is not None:
371
+ pose = self.get_pose(frame, **kwargs)
372
+ else:
373
+ pose = None
374
+
375
+ self.is_initialized = True
376
+
377
+ return pose
378
+
379
+ def get_pose(self, frame=None, **kwargs):
380
+ """
381
+ Get the pose of an image
382
+
383
+ Parameters
384
+ -----------
385
+ frame :class:`numpy.ndarray`
386
+ image as a numpy array
387
+
388
+ Returns
389
+ --------
390
+ pose :class:`numpy.ndarray`
391
+ the pose estimated by DeepLabCut for the input image
392
+ """
393
+
394
+ if frame is None:
395
+ raise DLCLiveError("No frame provided for live pose estimation")
396
+
397
+ frame = self.process_frame(frame)
398
+
399
+ if self.model_type in ["base", "tensorrt"]:
400
+
401
+ pose_output = self.sess.run(
402
+ self.outputs, feed_dict={self.inputs: np.expand_dims(frame, axis=0)}
403
+ )
404
+
405
+ elif self.model_type == "tflite":
406
+
407
+ self.tflite_interpreter.set_tensor(
408
+ self.inputs[0]["index"],
409
+ np.expand_dims(frame, axis=0).astype(np.float32),
410
+ )
411
+ self.tflite_interpreter.invoke()
412
+
413
+ if len(self.outputs) > 1:
414
+ pose_output = [
415
+ self.tflite_interpreter.get_tensor(self.outputs[0]["index"]),
416
+ self.tflite_interpreter.get_tensor(self.outputs[1]["index"]),
417
+ ]
418
+ else:
419
+ pose_output = self.tflite_interpreter.get_tensor(
420
+ self.outputs[0]["index"]
421
+ )
422
+
423
+ else:
424
+
425
+ raise DLCLiveError(
426
+ "model_type = {} is not supported. model_type must be 'base', 'tflite', or 'tensorrt'".format(
427
+ self.model_type
428
+ )
429
+ )
430
+
431
+ # check if using TFGPUinference flag
432
+ # if not, get pose from network output
433
+
434
+ if len(pose_output) > 1:
435
+ scmap, locref = extract_cnn_output(pose_output, self.cfg)
436
+ num_outputs = self.cfg.get("num_outputs", 1)
437
+ if num_outputs > 1:
438
+ self.pose = multi_pose_predict(
439
+ scmap, locref, self.cfg["stride"], num_outputs
440
+ )
441
+ else:
442
+ self.pose = argmax_pose_predict(scmap, locref, self.cfg["stride"])
443
+ else:
444
+ pose = np.array(pose_output[0])
445
+ self.pose = pose[:, [1, 0, 2]]
446
+
447
+ # display image if display=True before correcting pose for cropping/resizing
448
+
449
+ if self.display is not None:
450
+ self.display.display_frame(frame, self.pose)
451
+
452
+ # if frame is cropped, convert pose coordinates to original frame coordinates
453
+
454
+ if self.resize is not None:
455
+ self.pose[:, :2] *= 1 / self.resize
456
+
457
+ if self.cropping is not None:
458
+ self.pose[:, 0] += self.cropping[0]
459
+ self.pose[:, 1] += self.cropping[2]
460
+
461
+ if self.dynamic_cropping is not None:
462
+ self.pose[:, 0] += self.dynamic_cropping[0]
463
+ self.pose[:, 1] += self.dynamic_cropping[2]
464
+
465
+ # process the pose
466
+
467
+ if self.processor:
468
+ self.pose = self.processor.process(self.pose, **kwargs)
469
+
470
+ return self.pose
471
+
472
+ def close(self):
473
+ """ Close tensorflow session
474
+ """
475
+
476
+ self.sess.close()
477
+ self.sess = None
478
+ self.is_initialized = False
479
+ if self.display is not None:
480
+ self.display.destroy()
Repositories/DeepLabCut-live/dlclive/exceptions.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ class DLCLiveError(Exception):
10
+ """ Generic error type for incorrect use of the DLCLive class """
11
+
12
+ pass
13
+
14
+
15
+ class DLCLiveWarning(Warning):
16
+ """ Generic warning for incorrect use of the DLCLive class """
17
+
18
+ pass
Repositories/DeepLabCut-live/dlclive/graph.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import tensorflow as tf
10
+
11
+ vers = (tf.__version__).split(".")
12
+ if int(vers[0]) == 2 or int(vers[0]) == 1 and int(vers[1]) > 12:
13
+ tf = tf.compat.v1
14
+ else:
15
+ tf = tf
16
+
17
+
18
+ def read_graph(file):
19
+ """
20
+ Loads the graph from a protobuf file
21
+
22
+ Parameters
23
+ -----------
24
+ file : string
25
+ path to the protobuf file
26
+
27
+ Returns
28
+ --------
29
+ graph_def :class:`tensorflow.tf.compat.v1.GraphDef`
30
+ The graph definition of the DeepLabCut model found at the object's path
31
+ """
32
+
33
+ with tf.io.gfile.GFile(file, "rb") as f:
34
+ graph_def = tf.GraphDef()
35
+ graph_def.ParseFromString(f.read())
36
+ return graph_def
37
+
38
+
39
+ def finalize_graph(graph_def):
40
+ """
41
+ Finalize the graph and get inputs to model
42
+
43
+ Parameters
44
+ -----------
45
+ graph_def :class:`tensorflow.compat.v1.GraphDef`
46
+ The graph of the DeepLabCut model, read using the :func:`read_graph` method
47
+
48
+ Returns
49
+ --------
50
+ graph :class:`tensorflow.compat.v1.GraphDef`
51
+ The finalized graph of the DeepLabCut model
52
+ inputs :class:`tensorflow.Tensor`
53
+ Input tensor(s) for the model
54
+ """
55
+
56
+ graph = tf.Graph()
57
+ with graph.as_default():
58
+ tf.import_graph_def(graph_def, name="DLC")
59
+ graph.finalize()
60
+
61
+ return graph
62
+
63
+
64
+ def get_output_nodes(graph):
65
+ """
66
+ Get the output node names from a graph
67
+
68
+ Parameters
69
+ -----------
70
+ graph :class:`tensorflow.Graph`
71
+ The graph of the DeepLabCut model
72
+
73
+ Returns
74
+ --------
75
+ output : list
76
+ the output node names as a list of strings
77
+ """
78
+
79
+ op_names = [str(op.name) for op in graph.get_operations()]
80
+ if "concat_1" in op_names[-1]:
81
+ output = [op_names[-1]]
82
+ else:
83
+ output = [op_names[-1], op_names[-2]]
84
+
85
+ return output
86
+
87
+
88
+ def get_output_tensors(graph):
89
+ """
90
+ Get the names of the output tensors from a graph
91
+
92
+ Parameters
93
+ -----------
94
+ graph :class:`tensorflow.Graph`
95
+ The graph of the DeepLabCut model
96
+
97
+ Returns
98
+ --------
99
+ output : list
100
+ the output tensor names as a list of strings
101
+ """
102
+
103
+ output_nodes = get_output_nodes(graph)
104
+ output_tensor = [out + ":0" for out in output_nodes]
105
+ return output_tensor
106
+
107
+
108
+ def get_input_tensor(graph):
109
+
110
+ input_tensor = str(graph.get_operations()[0].name) + ":0"
111
+ return input_tensor
112
+
113
+
114
+ def extract_graph(graph, tf_config=None):
115
+ """
116
+ Initializes a tensorflow session with the specified graph and extracts the model's inputs and outputs
117
+
118
+ Parameters
119
+ -----------
120
+ graph :class:`tensorflow.Graph`
121
+ a tensorflow graph containing the desired model
122
+ tf_config :class:`tensorflow.ConfigProto`
123
+
124
+ Returns
125
+ --------
126
+ sess :class:`tensorflow.Session`
127
+ a tensorflow session with the specified graph definition
128
+ outputs :class:`tensorflow.Tensor`
129
+ the output tensor(s) for the model
130
+ """
131
+
132
+ input_tensor = get_input_tensor(graph)
133
+ output_tensor = get_output_tensors(graph)
134
+ sess = tf.Session(graph=graph, config=tf_config)
135
+ inputs = graph.get_tensor_by_name(input_tensor)
136
+ outputs = [graph.get_tensor_by_name(out) for out in output_tensor]
137
+
138
+ return sess, inputs, outputs
Repositories/DeepLabCut-live/dlclive/pose.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import numpy as np
10
+
11
+
12
+ def extract_cnn_output(outputs, cfg):
13
+ """
14
+ Extract location refinement and score map from DeepLabCut network
15
+
16
+ Parameters
17
+ -----------
18
+ outputs : list
19
+ List of outputs from DeepLabCut network.
20
+ Requires 2 entries:
21
+ index 0 is output from Sigmoid
22
+ index 1 is output from pose/locref_pred/block4/BiasAdd
23
+
24
+ cfg : dict
25
+ Dictionary read from the pose_cfg.yaml file for the network.
26
+
27
+ Returns
28
+ --------
29
+ scmap : ?
30
+ score map
31
+
32
+ locref : ?
33
+ location refinement
34
+ """
35
+
36
+ scmap = outputs[0]
37
+ scmap = np.squeeze(scmap)
38
+ locref = None
39
+ if cfg["location_refinement"]:
40
+ locref = np.squeeze(outputs[1])
41
+ shape = locref.shape
42
+ locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
43
+ locref *= cfg["locref_stdev"]
44
+ if len(scmap.shape) == 2: # for single body part!
45
+ scmap = np.expand_dims(scmap, axis=2)
46
+ return scmap, locref
47
+
48
+
49
+ def argmax_pose_predict(scmap, offmat, stride):
50
+ """
51
+ Combines score map and offsets to the final pose
52
+
53
+ Parameters
54
+ -----------
55
+ scmap : ?
56
+ score map
57
+
58
+ offmat : ?
59
+ offsets
60
+
61
+ stride : ?
62
+ ?
63
+
64
+ Returns
65
+ --------
66
+ pose :class:`numpy.ndarray`
67
+ pose as a numpy array
68
+ """
69
+
70
+ num_joints = scmap.shape[2]
71
+ pose = []
72
+ for joint_idx in range(num_joints):
73
+ maxloc = np.unravel_index(
74
+ np.argmax(scmap[:, :, joint_idx]), scmap[:, :, joint_idx].shape
75
+ )
76
+ offset = np.array(offmat[maxloc][joint_idx])[::-1]
77
+ pos_f8 = np.array(maxloc).astype("float") * stride + 0.5 * stride + offset
78
+ pose.append(np.hstack((pos_f8[::-1], [scmap[maxloc][joint_idx]])))
79
+ return np.array(pose)
80
+
81
+
82
+ def get_top_values(scmap, n_top=5):
83
+ batchsize, ny, nx, num_joints = scmap.shape
84
+ scmap_flat = scmap.reshape(batchsize, nx * ny, num_joints)
85
+ if n_top == 1:
86
+ scmap_top = np.argmax(scmap_flat, axis=1)[None]
87
+ else:
88
+ scmap_top = np.argpartition(scmap_flat, -n_top, axis=1)[:, -n_top:]
89
+ for ix in range(batchsize):
90
+ vals = scmap_flat[ix, scmap_top[ix], np.arange(num_joints)]
91
+ arg = np.argsort(-vals, axis=0)
92
+ scmap_top[ix] = scmap_top[ix, arg, np.arange(num_joints)]
93
+ scmap_top = scmap_top.swapaxes(0, 1)
94
+
95
+ Y, X = np.unravel_index(scmap_top, (ny, nx))
96
+ return Y, X
97
+
98
+
99
+ def multi_pose_predict(scmap, locref, stride, num_outputs):
100
+ Y, X = get_top_values(scmap[None], num_outputs)
101
+ Y, X = Y[:, 0], X[:, 0]
102
+ num_joints = scmap.shape[2]
103
+ DZ = np.zeros((num_outputs, num_joints, 3))
104
+ for m in range(num_outputs):
105
+ for k in range(num_joints):
106
+ x = X[m, k]
107
+ y = Y[m, k]
108
+ DZ[m, k, :2] = locref[y, x, k, :]
109
+ DZ[m, k, 2] = scmap[y, x, k]
110
+
111
+ X = X.astype("float32") * stride + 0.5 * stride + DZ[:, :, 0]
112
+ Y = Y.astype("float32") * stride + 0.5 * stride + DZ[:, :, 1]
113
+ P = DZ[:, :, 2]
114
+
115
+ pose = np.empty((num_joints, num_outputs * 3), dtype="float32")
116
+ pose[:, 0::3] = X.T
117
+ pose[:, 1::3] = Y.T
118
+ pose[:, 2::3] = P.T
119
+
120
+ return pose
Repositories/DeepLabCut-live/dlclive/processor/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### DeepLabCut-live Processors
2
+
3
+ The `Processor` class allows users to implement processing or computation steps after DeepLabCut pose estimation. For example, a `Processor` can detect certain features of a pose and turn on an LED or an optogenetics laser, a `Processor` can implement a forward-prediction model that predicts animal's pose ~10-100 ms into the future to apply feedback with zero latency, or a `Processor` can do both.
4
+
5
+ The `Processor` is designed to be extremely flexible: it must only contain two methods: `Processor.process`, whose input and output is a pose as a numpy array, and `Processor.save`, which allows users to implement a method that saves any data the `Processor` acquires, such as the time that desired behavior occured or the times an LED or laser was turned on/off. The save method must be written by the user, so users can choose whether this data is saved as a text/csv, numpy, pickle, or pandas file to provide a few examples.
6
+
7
+ To write your own custom `Processor`, your class must inherit the base `Processor` class (see [here](./processor.py)):
8
+ ```
9
+ from dlclive import Processor
10
+ class MyCustomProcessor(Processor):
11
+ ...
12
+ ```
13
+
14
+ To implement your processing steps, overwrite the `Processor.process` method:
15
+ ```
16
+ def process(pose, **kwargs):
17
+ # my processing steps go here
18
+ return pose
19
+ ```
20
+
21
+ For example `Processor` objects that communicate with Teensy microcontrollers to [control an optogenetics laser](../../example_processors/TeensyLaser), [turn on an LED when upond detecting a mouse licking](../../example_processors/MouseLickLED), or [turn on an LED upon detecting a dog's rearing movement](../../example_processors/DogJumpLED), see our [example_teensy](../../example_processors) directory.
Repositories/DeepLabCut-live/dlclive/processor/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+ from dlclive.processor.processor import Processor
9
+ from dlclive.processor.kalmanfilter import KalmanFilterPredictor
Repositories/DeepLabCut-live/dlclive/processor/kalmanfilter.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import time
10
+ import numpy as np
11
+ from dlclive.processor import Processor
12
+
13
+
14
+ class KalmanFilterPredictor(Processor):
15
+ def __init__(
16
+ self,
17
+ adapt=True,
18
+ forward=0.002,
19
+ fps=30,
20
+ nderiv=2,
21
+ priors=[10, 10],
22
+ initial_var=5,
23
+ process_var=5,
24
+ dlc_var=20,
25
+ lik_thresh=0,
26
+ **kwargs,
27
+ ):
28
+
29
+ super().__init__(**kwargs)
30
+
31
+ self.adapt = adapt
32
+ self.forward = forward
33
+ self.dt = 1.0 / fps
34
+ self.nderiv = nderiv
35
+ self.priors = np.hstack(([1e5], priors))
36
+ self.initial_var = initial_var
37
+ self.process_var = process_var
38
+ self.dlc_var = dlc_var
39
+ self.lik_thresh = lik_thresh
40
+ self.is_initialized = False
41
+ self.last_pose_time = 0
42
+
43
+ def _get_forward_model(self, dt):
44
+
45
+ F = np.zeros((self.n_states, self.n_states))
46
+ for d in range(self.nderiv + 1):
47
+ for i in range(self.n_states - (d * self.bp * 2)):
48
+ F[i, i + (2 * self.bp * d)] = (dt ** d) / max(1, d)
49
+
50
+ return F
51
+
52
+ def _init_kf(self, pose):
53
+
54
+ # get number of body parts
55
+ self.bp = pose.shape[0]
56
+ self.n_states = self.bp * 2 * (self.nderiv + 1)
57
+
58
+ # initialize state matrix, set position to first pose
59
+ self.X = np.zeros((self.n_states, 1))
60
+ self.X[: (self.bp * 2)] = pose[:, :2].reshape(self.bp * 2, 1)
61
+
62
+ # initialize covariance matrix, measurement noise and process noise
63
+ self.P = np.eye(self.n_states) * self.initial_var
64
+ self.R = np.eye(self.n_states) * self.dlc_var
65
+ self.Q = np.eye(self.n_states) * self.process_var
66
+
67
+ self.H = np.eye(self.n_states)
68
+ self.K = np.zeros((self.n_states, self.n_states))
69
+ self.I = np.eye(self.n_states)
70
+
71
+ # initialize priors for forward prediction step only
72
+ B = np.repeat(self.priors, self.bp * 2)
73
+ self.B = B.reshape(B.size, 1)
74
+
75
+ self.is_initialized = True
76
+
77
+ def _predict(self):
78
+
79
+ F = self._get_forward_model(time.time() - self.last_pose_time)
80
+
81
+ Pd = np.diag(self.P).reshape(self.P.shape[0], 1)
82
+ X = (1 / ((1 / Pd) + (1 / self.B))) * (self.X / Pd)
83
+
84
+ self.Xp = np.dot(F, X)
85
+ self.Pp = np.dot(np.dot(F, self.P), F.T) + self.Q
86
+
87
+ def _get_residuals(self, pose):
88
+
89
+ z = np.zeros((self.n_states, 1))
90
+ z[: (self.bp * 2)] = pose[: self.bp, :2].reshape(self.bp * 2, 1)
91
+ for i in range(self.bp * 2, self.n_states):
92
+ z[i] = (z[i - (self.bp * 2)] - self.X[i - (self.bp * 2)]) / self.dt
93
+ self.y = z - np.dot(self.H, self.Xp)
94
+
95
+ def _update(self, liks):
96
+
97
+ S = np.dot(self.H, np.dot(self.Pp, self.H.T)) + self.R
98
+ K = np.dot(np.dot(self.Pp, self.H.T), np.linalg.inv(S))
99
+ self.X = self.Xp + np.dot(K, self.y)
100
+ self.X[liks < self.lik_thresh] = self.Xp[liks < self.lik_thresh]
101
+ self.P = np.dot(self.I - np.dot(K, self.H), self.Pp)
102
+
103
+ def _get_future_pose(self, dt):
104
+
105
+ Ff = self._get_forward_model(dt)
106
+ Xf = np.dot(Ff, self.X)
107
+ future_pose = Xf[: (self.bp * 2)].reshape(self.bp, 2)
108
+
109
+ return future_pose
110
+
111
+ def _get_state_likelihood(self, pose):
112
+
113
+ liks = pose[:, 2]
114
+ liks_xy = np.repeat(liks, 2)
115
+ liks_xy_deriv = np.tile(liks_xy, self.nderiv + 1)
116
+ liks_state = liks_xy_deriv.reshape(liks_xy_deriv.shape[0], 1)
117
+ return liks_state
118
+
119
+ def process(self, pose, **kwargs):
120
+
121
+ if not self.is_initialized:
122
+
123
+ self._init_kf(pose)
124
+ self.last_pose_time = time.time()
125
+ return pose
126
+
127
+ else:
128
+
129
+ self._predict()
130
+ self._get_residuals(pose)
131
+ liks = self._get_state_likelihood(pose)
132
+ self._update(liks)
133
+
134
+ forward_time = (
135
+ (time.time() - kwargs["frame_time"] + self.forward)
136
+ if self.adapt
137
+ else self.forward
138
+ )
139
+
140
+ future_pose = self._get_future_pose(forward_time)
141
+ future_pose = np.hstack((future_pose, pose[:, 2].reshape(self.bp, 1)))
142
+
143
+ self.last_pose_time = time.time()
144
+ return future_pose
Repositories/DeepLabCut-live/dlclive/processor/processor.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+ """
9
+ Default processor class. Processors must contain two methods:
10
+ i) process: takes in a pose, performs operations, and returns a pose
11
+ ii) save: saves any internal data generated by the processor (such as timestamps for commands to external hardware)
12
+ """
13
+
14
+
15
+ class Processor(object):
16
+ def __init__(self, **kwargs):
17
+ pass
18
+
19
+ def process(self, pose, **kwargs):
20
+ return pose
21
+
22
+ def save(self, file=""):
23
+ return 0
Repositories/DeepLabCut-live/dlclive/utils.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import numpy as np
10
+ import warnings
11
+ from dlclive.exceptions import DLCLiveWarning
12
+
13
+ try:
14
+ import skimage
15
+
16
+ SK_IM = True
17
+ except Exception:
18
+ SK_IM = False
19
+
20
+ try:
21
+ import cv2
22
+
23
+ OPEN_CV = True
24
+ except Exception:
25
+ from PIL import Image
26
+
27
+ OPEN_CV = False
28
+ warnings.warn(
29
+ "OpenCV is not installed. Using pillow for image processing, which is slower.",
30
+ DLCLiveWarning,
31
+ )
32
+
33
+
34
+ def convert_to_ubyte(frame):
35
+ """ Converts an image to unsigned 8-bit integer numpy array.
36
+ If scikit-image is installed, uses skimage.img_as_ubyte, otherwise, uses a similar custom function.
37
+
38
+ Parameters
39
+ ----------
40
+ image : :class:`numpy.ndarray`
41
+ an image as a numpy array
42
+
43
+ Returns
44
+ -------
45
+ :class:`numpy.ndarray`
46
+ image converted to uint8
47
+ """
48
+
49
+ if SK_IM:
50
+ return skimage.img_as_ubyte(frame)
51
+ else:
52
+ return _img_as_ubyte_np(frame)
53
+
54
+
55
+ def resize_frame(frame, resize=None):
56
+ """ Resizes an image. Uses OpenCV if installed, otherwise, uses pillow
57
+
58
+ Parameters
59
+ ----------
60
+ image : :class:`numpy.ndarray`
61
+ an image as a numpy array
62
+ """
63
+
64
+ if (resize is not None) and (resize != 1):
65
+
66
+ if OPEN_CV:
67
+
68
+ new_x = int(frame.shape[0] * resize)
69
+ new_y = int(frame.shape[1] * resize)
70
+ return cv2.resize(frame, (new_y, new_x))
71
+
72
+ else:
73
+
74
+ img = Image.fromarray(frame)
75
+ img = img.resize((new_y, new_x))
76
+ return np.asarray(img)
77
+
78
+ else:
79
+
80
+ return frame
81
+
82
+
83
+ def img_to_rgb(frame):
84
+ """ Convert an image to RGB. Uses OpenCV is installed, otherwise uses pillow.
85
+
86
+ Parameters
87
+ ----------
88
+ frame : :class:`numpy.ndarray
89
+ an image as a numpy array
90
+ """
91
+
92
+ if frame.ndim == 2:
93
+
94
+ return gray_to_rgb(frame)
95
+
96
+ elif frame.ndim == 3:
97
+
98
+ return bgr_to_rgb(frame)
99
+
100
+ else:
101
+
102
+ warnings.warn(
103
+ f"Image has {frame.ndim} dimensions. Must be 2 or 3 dimensions to convert to RGB",
104
+ DLCLiveWarning,
105
+ )
106
+ return frame
107
+
108
+
109
+ def gray_to_rgb(frame):
110
+ """ Convert an image from grayscale to RGB. Uses OpenCV is installed, otherwise uses pillow.
111
+
112
+ Parameters
113
+ ----------
114
+ frame : :class:`numpy.ndarray
115
+ an image as a numpy array
116
+ """
117
+
118
+ if OPEN_CV:
119
+
120
+ return cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
121
+
122
+ else:
123
+
124
+ img = Image.fromarray(frame)
125
+ img = img.convert("RGB")
126
+ return np.asarray(img)
127
+
128
+
129
+ def bgr_to_rgb(frame):
130
+ """ Convert an image from BGR to RGB. Uses OpenCV is installed, otherwise uses pillow.
131
+
132
+ Parameters
133
+ ----------
134
+ frame : :class:`numpy.ndarray
135
+ an image as a numpy array
136
+ """
137
+
138
+ if OPEN_CV:
139
+
140
+ return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
141
+
142
+ else:
143
+
144
+ img = Image.fromarray(frame)
145
+ img = img.convert("RGB")
146
+ return np.asarray(img)
147
+
148
+
149
+ def _img_as_ubyte_np(frame):
150
+ """ Converts an image as a numpy array to unsinged 8-bit integer.
151
+ As in scikit-image img_as_ubyte, converts negative pixels to 0 and converts range to [0, 255]
152
+
153
+ Parameters
154
+ ----------
155
+ image : :class:`numpy.ndarray`
156
+ an image as a numpy array
157
+
158
+ Returns
159
+ -------
160
+ :class:`numpy.ndarray`
161
+ image converted to uint8
162
+ """
163
+
164
+ frame = np.array(frame)
165
+ im_type = frame.dtype.type
166
+
167
+ # check if already ubyte
168
+ if np.issubdtype(im_type, np.uint8):
169
+
170
+ return frame
171
+
172
+ # if floating
173
+ elif np.issubdtype(im_type, np.floating):
174
+
175
+ if (np.min(frame) < -1) or (np.max(frame) > 1):
176
+ raise ValueError("Images of type float must be between -1 and 1.")
177
+
178
+ frame *= 255
179
+ frame = np.rint(frame)
180
+ frame = np.clip(frame, 0, 255)
181
+ return frame.astype(np.uint8)
182
+
183
+ # if integer
184
+ elif np.issubdtype(im_type, np.integer):
185
+
186
+ im_type_info = np.iinfo(im_type)
187
+ frame *= 255 / im_type_info.max
188
+ frame[frame < 0] = 0
189
+ return frame.astype(np.uint8)
190
+
191
+ else:
192
+
193
+ raise TypeError(
194
+ "image of type {} could not be converted to ubyte".format(im_type)
195
+ )
196
+
197
+
198
+ def decode_fourcc(cc):
199
+ """
200
+ Convert float fourcc code from opencv to characters.
201
+ If decode fails, returns empty string.
202
+ https://stackoverflow.com/a/49138893
203
+ Arguments:
204
+ cc (float, int): fourcc code from opencv
205
+ Returns:
206
+ str: Character format of fourcc code
207
+
208
+ Examples:
209
+ >>> vid = cv2.VideoCapture('/some/video/path.avi')
210
+ >>> decode_fourcc(vid.get(cv2.CAP_PROP_FOURCC))
211
+ 'DIVX'
212
+ """
213
+ try:
214
+ decoded = "".join([chr((int(cc) >> 8 * i) & 0xFF) for i in range(4)])
215
+ except:
216
+ decoded = ""
217
+
218
+ return decoded
Repositories/DeepLabCut-live/dlclive/version.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Live Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+ admin@deeplabcut.org
5
+
6
+ Licensed under GNU Lesser General Public License v3.0
7
+ """
8
+
9
+
10
+ __version__ = "1.0.3"
11
+ VERSION = __version__
Repositories/DeepLabCut-live/docs/install_desktop.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Install DeepLabCut-live on a desktop (Windows/Ubuntu)
2
+
3
+ We recommend that you install DeepLabCut-live in a conda environment (It is a standard python package though, and other distributions will also likely work). In this case, please install Anaconda:
4
+
5
+ - [Windows](https://docs.anaconda.com/anaconda/install/windows/)
6
+ - [Linux](https://docs.anaconda.com/anaconda/install/linux/)
7
+
8
+ Create a conda environment with python 3.7 and tensorflow:
9
+
10
+ ```
11
+ conda create -n dlc-live python=3.7 tensorflow-gpu==1.13.1 # if using GPU
12
+ conda create -n dlc-live python=3.7 tensorflow==1.13.1 # if not using GPU
13
+ ```
14
+
15
+ Activate the conda environment, install the DeepLabCut-live package, then test the installation:
16
+
17
+ ```
18
+ conda activate dlc-live
19
+ pip install deeplabcut-live
20
+ dlc-live-test
21
+ ```
22
+
23
+ Note, you can also just run the test:
24
+
25
+ `dlc-live-test`
26
+
27
+ If installed properly, this script will i) create a temporary folder ii) download the full_dog model from the [DeepLabCut Model Zoo](http://www.mousemotorlab.org/dlc-modelzoo), iii) download a short video clip of a dog, and iv) run inference while displaying keypoints. v) remove the temporary folder.
28
+
29
+ Please note, you also should have curl installed on your computer (typically this is already installed on your system), but just in case, just run `sudo apt install curl`
Repositories/DeepLabCut-live/docs/install_jetson.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Install DeepLabCut-live on a NVIDIA Jetson Development Kit
2
+
3
+ First, please follow NVIDIA's specific instructions to setup your Jetson Development Kit (see [Jetson Development Kit User Guides](https://developer.nvidia.com/embedded/learn/getting-started-jetson)). Once you have installed the NVIDIA Jetpack on your Jetson Development Kit, make sure all system libraries are up-to-date. In a terminal, run:
4
+
5
+ ```
6
+ sudo apt-get update
7
+ sudo apt-get upgrade
8
+ ```
9
+
10
+ Lastly, please test that CUDA is installed properly by running: `nvcc --version`. The output should say the version of CUDA installed on your Jetson.
11
+
12
+ #### Install python, virtualenv, and tensorflow
13
+
14
+ We highly recommend installing DeepLabCut-live in a virtual environment. Please run the following command to install system dependencies needed to run python, to create virtual environments, and to run tensorflow:
15
+
16
+ ```
17
+ sudo apt-get update
18
+ sudo apt-get install libhdf5-serial-dev \
19
+ hdf5-tools \
20
+ libhdf5-dev \
21
+ zlib1g-dev \
22
+ zip \
23
+ libjpeg8-dev \
24
+ liblapack-dev \
25
+ libblas-dev \
26
+ gfortran \
27
+ python3-pip \
28
+ python3-venv \
29
+ python3-tk \
30
+ curl
31
+ ```
32
+
33
+ #### Create a virtual environment
34
+
35
+ Next, create a virtual environment called `dlc-live`, activate the `dlc-live` environment, and update it's package manger:
36
+
37
+ ```
38
+ python3 -m venv dlc-live
39
+ source dlc-live/bin/activate
40
+ pip install -U pip testresources setuptools
41
+ ```
42
+
43
+ #### Install DeepLabCut-live dependencies
44
+
45
+ First, install python dependencies to run tensorflow (from [NVIDIA instructions to install tensorflow on Jetson platforms](https://docs.nvidia.com/deeplearning/frameworks/install-tf-jetson-platform/index.html)). _This may take ~15-30 minutes._
46
+
47
+ ```
48
+ pip3 install numpy==1.16.1 \
49
+ future==0.17.1 \
50
+ mock==3.0.5 \
51
+ h5py==2.9.0 \
52
+ keras_preprocessing==1.0.5 \
53
+ keras_applications==1.0.8 \
54
+ gast==0.2.2 \
55
+ futures \
56
+ protobuf \
57
+ pybind11
58
+ ```
59
+
60
+ Next, install tensorflow 1.x. This command will depend on the version of Jetpack you are using. If you are uncertain, please refer to [NVIDIA's instructions](https://docs.nvidia.com/deeplearning/frameworks/install-tf-jetson-platform/index.html#install). To install tensorflow 1.x on the latest version of NVIDIA Jetpack (version 4.4 as of 8/2/2020), please the command below. _This step will also take 15-30 mins_.
61
+
62
+ ```
63
+ pip3 install --pre --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v44 'tensorflow<2'
64
+ ```
65
+
66
+ Lastly, copy the opencv-python bindings into your virtual environment:
67
+
68
+ ```
69
+ cp -r /usr/lib/python3.6/dist-packages ~/dlc-live/lib/python3.6/dist-packages
70
+ ```
71
+
72
+ #### Install the DeepLabCut-live package
73
+
74
+ Finally, please install DeepLabCut-live from PyPi (_this will take 3-5 mins_), then test the installation:
75
+
76
+ ```
77
+ pip install deeplabcut-live
78
+ dlc-live-test
79
+ ```
80
+
81
+ If installed properly, this script will i) download the full_dog model from the DeepLabCut Model Zoo, ii) download a short video clip of a dog, and iii) run inference while displaying keypoints.
Repositories/DeepLabCut-live/example_processors/DogJumpLED/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+ from .izzy_jump import IzzyJump, IzzyJumpKF
9
+ from .izzy_jump import IzzyJumpOffline, IzzyJumpKFOffline
Repositories/DeepLabCut-live/example_processors/DogJumpLED/izzy_jump.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import serial
10
+ import struct
11
+ import time
12
+ import numpy as np
13
+
14
+ from dlclive.processor import Processor, KalmanFilterPredictor
15
+
16
+
17
+ class IzzyJump(Processor):
18
+ def __init__(self, com="", lik_thresh=0.5, baudrate=int(9600), **kwargs):
19
+
20
+ super().__init__()
21
+ self.ser = serial.Serial(com, baudrate, timeout=0)
22
+ self.lik_thresh = lik_thresh
23
+ self.led_times = []
24
+ self.last_light = 0
25
+
26
+ def close_serial(self):
27
+
28
+ self.ser.close()
29
+
30
+ def switch_led(self, val, frame_time):
31
+
32
+ ### check status of led ###
33
+
34
+ self.ser.write(b"R")
35
+
36
+ led_byte = b""
37
+ led_status = None
38
+ while (len(led_byte) != 0) or (led_status is None):
39
+ led_byte = self.ser.read()
40
+ if len(led_byte) > 0:
41
+ led_status = ord(led_byte)
42
+
43
+ if led_status != val:
44
+ ctime = time.time()
45
+ if ctime - self.last_light > 0.25:
46
+ self.ser.write(b"L")
47
+ self.last_light = ctime
48
+ self.led_times.append((val, frame_time, ctime))
49
+
50
+ def process(self, pose, **kwargs):
51
+
52
+ ### bodyparts
53
+ # 0. nose
54
+ # 1. L-eye
55
+ # 2. R-eye
56
+ # 3. L-ear
57
+ # 4. R-ear
58
+ # 5. Throat
59
+ # 6. Withers
60
+ # 7. Tailset
61
+ # 8. L-front-paw
62
+ # 9. R-front-paw
63
+ # 10. L-front-wrist
64
+ # 11. R-front-wrist
65
+ # 12. L-front-elbow
66
+ # 13. R-front-elbow
67
+ # ...
68
+
69
+ l_elbow = pose[12, 1] if pose[12, 2] > self.lik_thresh else None
70
+ r_elbow = pose[13, 1] if pose[13, 2] > self.lik_thresh else None
71
+ elbows = [l_elbow, r_elbow]
72
+ this_elbow = (
73
+ min([e for e in elbows if e is not None])
74
+ if any([e is not None for e in elbows])
75
+ else None
76
+ )
77
+
78
+ withers = pose[6, 1] if pose[6, 2] > self.lik_thresh else None
79
+
80
+ if kwargs["record"]:
81
+ if withers is not None and this_elbow is not None:
82
+ if this_elbow < withers:
83
+ self.switch_led(True, kwargs["frame_time"])
84
+ else:
85
+ self.switch_led(False, kwargs["frame_time"])
86
+
87
+ return pose
88
+
89
+ def save(self, filename):
90
+
91
+ ### save stim on and stim off times
92
+
93
+ if filename[-4:] != ".npy":
94
+ filename += ".npy"
95
+ arr = np.array(self.led_times, dtype=float)
96
+ try:
97
+ np.save(filename, arr)
98
+ save_code = True
99
+ except Exception:
100
+ save_code = False
101
+
102
+ return save_code
103
+
104
+
105
+ class IzzyJumpKF(KalmanFilterPredictor, IzzyJump):
106
+ def __init__(
107
+ self,
108
+ com="",
109
+ lik_thresh=0.5,
110
+ baudrate=int(9600),
111
+ adapt=True,
112
+ forward=0.003,
113
+ fps=30,
114
+ nderiv=2,
115
+ priors=[1, 1],
116
+ initial_var=1,
117
+ process_var=1,
118
+ dlc_var=4,
119
+ ):
120
+
121
+ super().__init__(
122
+ adapt=adapt,
123
+ forward=forward,
124
+ fps=fps,
125
+ nderiv=nderiv,
126
+ priors=priors,
127
+ initial_var=initial_var,
128
+ process_var=process_var,
129
+ dlc_var=dlc_var,
130
+ com=com,
131
+ lik_thresh=lik_thresh,
132
+ baudrate=baudrate,
133
+ )
134
+
135
+ def process(self, pose, **kwargs):
136
+
137
+ future_pose = KalmanFilterPredictor.process(self, pose, **kwargs)
138
+ final_pose = IzzyJump.process(self, future_pose, **kwargs)
139
+ return final_pose
140
+
141
+ def save(self, filename):
142
+
143
+ return IzzyJump.save(self, filename)
Repositories/DeepLabCut-live/example_processors/DogJumpLED/izzy_jump_offline.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import struct
10
+ import time
11
+ import numpy as np
12
+
13
+ from dlclive.processor import Processor, KalmanFilterPredictor
14
+
15
+
16
+ class IzzyJumpOffline(Processor):
17
+ def __init__(self, lik_thresh=0.5, **kwargs):
18
+
19
+ super().__init__()
20
+ self.lik_thresh = lik_thresh
21
+ self.led_times = []
22
+ self.last_light = 0
23
+ self.led_status = False
24
+
25
+ def switch_led(self, val, frame_time):
26
+
27
+ if self.led_status != val:
28
+ ctime = frame_time
29
+ if ctime - self.last_light > 0.25:
30
+ self.led_status = val
31
+ self.last_light = ctime
32
+ self.led_times.append((val, frame_time, ctime))
33
+
34
+ def process(self, pose, **kwargs):
35
+
36
+ ### bodyparts
37
+ # 0. nose
38
+ # 1. L-eye
39
+ # 2. R-eye
40
+ # 3. L-ear
41
+ # 4. R-ear
42
+ # 5. Throat
43
+ # 6. Withers
44
+ # 7. Tailset
45
+ # 8. L-front-paw
46
+ # 9. R-front-paw
47
+ # 10. L-front-wrist
48
+ # 11. R-front-wrist
49
+ # 12. L-front-elbow
50
+ # 13. R-front-elbow
51
+ # ...
52
+
53
+ l_elbow = pose[12, 1] if pose[12, 2] > self.lik_thresh else None
54
+ r_elbow = pose[13, 1] if pose[13, 2] > self.lik_thresh else None
55
+ elbows = [l_elbow, r_elbow]
56
+ this_elbow = (
57
+ min([e for e in elbows if e is not None])
58
+ if any([e is not None for e in elbows])
59
+ else None
60
+ )
61
+
62
+ withers = pose[6, 1] if pose[6, 2] > self.lik_thresh else None
63
+
64
+ if kwargs["record"]:
65
+ if withers is not None and this_elbow is not None:
66
+ if this_elbow < withers:
67
+ self.switch_led(True, kwargs["frame_time"])
68
+ else:
69
+ self.switch_led(False, kwargs["frame_time"])
70
+
71
+ return pose
72
+
73
+ def save(self, filename):
74
+
75
+ ### save stim on and stim off times
76
+
77
+ if filename[-4:] != ".npy":
78
+ filename += ".npy"
79
+ arr = np.array(self.led_times, dtype=float)
80
+ try:
81
+ np.save(filename, arr)
82
+ save_code = True
83
+ except Exception:
84
+ save_code = False
85
+
86
+ return save_code
87
+
88
+
89
+ class IzzyJumpKFOffline(KalmanFilterPredictor, IzzyJumpOffline):
90
+ def __init__(
91
+ self,
92
+ lik_thresh=0.5,
93
+ adapt=True,
94
+ forward=0.003,
95
+ fps=30,
96
+ nderiv=2,
97
+ priors=[1, 1],
98
+ initial_var=1,
99
+ process_var=1,
100
+ dlc_var=4,
101
+ ):
102
+
103
+ super().__init__(
104
+ adapt=adapt,
105
+ forward=forward,
106
+ fps=fps,
107
+ nderiv=nderiv,
108
+ priors=priors,
109
+ initial_var=initial_var,
110
+ process_var=process_var,
111
+ dlc_var=dlc_var,
112
+ lik_thresh=lik_thresh,
113
+ )
114
+
115
+ def process(self, pose, **kwargs):
116
+
117
+ future_pose = KalmanFilterPredictor.process(self, pose, **kwargs)
118
+ final_pose = IzzyJumpOffline.process(self, future_pose, **kwargs)
119
+ return final_pose
120
+
121
+ def save(self, filename):
122
+
123
+ return IzzyJumpOffline.save(self, filename)
Repositories/DeepLabCut-live/example_processors/DogJumpLED/teensy_leds/teensy_leds.ino ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const int LED = 0;
2
+ const int IR = 1;
3
+ const int REC = 2;
4
+
5
+ void blink() {
6
+
7
+ Serial.write(!digitalRead(REC));
8
+ Serial.flush();
9
+ noTone(IR);
10
+ while (digitalRead(REC) == 0) {}
11
+
12
+ }
13
+
14
+ void setup() {
15
+
16
+ pinMode(LED, OUTPUT);
17
+ pinMode(IR, OUTPUT);
18
+ pinMode(REC, INPUT);
19
+ attachInterrupt(digitalPinToInterrupt(REC), blink, FALLING);
20
+
21
+ Serial.begin(9600);
22
+ }
23
+
24
+ void loop() {
25
+
26
+ unsigned int ser_avail = Serial.available();
27
+
28
+ while (ser_avail > 0) {
29
+
30
+ unsigned int cmd = Serial.read();
31
+
32
+ if (cmd == 'L') {
33
+
34
+ digitalWrite(LED, !digitalRead(LED));
35
+
36
+ } else if (cmd == 'R') {
37
+
38
+ Serial.write(digitalRead(LED));
39
+ Serial.flush();
40
+
41
+ } else if (cmd == 'I') {
42
+
43
+ tone(IR, 38000);
44
+
45
+ }
46
+
47
+ }
48
+
49
+ }
Repositories/DeepLabCut-live/example_processors/MouseLickLED/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+ from .lick_led import MouseLickLED
Repositories/DeepLabCut-live/example_processors/MouseLickLED/lick_led.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ import serial
10
+ import struct
11
+ import time
12
+ import numpy as np
13
+
14
+ from dlclive import Processor
15
+
16
+
17
+ class MouseLickLED(Processor):
18
+ def __init__(self, com, lik_thresh=0.5, baudrate=int(9600)):
19
+
20
+ super().__init__()
21
+ self.ser = serial.Serial(com, baudrate, timeout=0)
22
+ self.lik_thresh = lik_thresh
23
+ self.lick_frame_time = []
24
+ self.out_time = []
25
+ self.in_time = []
26
+
27
+ def close_serial(self):
28
+
29
+ self.ser.close()
30
+
31
+ def switch_led(self):
32
+
33
+ ### flush input buffer ###
34
+
35
+ self.ser.reset_input_buffer()
36
+
37
+ ### turn on IR LED ###
38
+
39
+ self.out_time.append(time.time())
40
+ self.ser.write(b"I")
41
+
42
+ ### wait for receiver ###
43
+
44
+ while True:
45
+ led_byte = self.ser.read()
46
+ if len(led_byte) > 0:
47
+ break
48
+ self.in_time.append(time.time())
49
+
50
+ def process(self, pose, **kwargs):
51
+
52
+ ### bodyparts
53
+ # 0. pupil-top
54
+ # 1. pupil-left
55
+ # 2. pupil-bottom
56
+ # 3. pupil-right
57
+ # 4. lip-upper
58
+ # 5. lip-lower
59
+ # 6. tongue
60
+ # 7. tube
61
+
62
+ if kwargs["record"]:
63
+ if pose[6, 2] > self.lik_thresh:
64
+ self.lick_frame_time.append(kwargs["frame_time"])
65
+ self.switch_led()
66
+
67
+ return pose
68
+
69
+ def save(self, filename):
70
+
71
+ ### save stim on and stim off times
72
+
73
+ filename += ".npy"
74
+ out_time = np.array(self.out_time)
75
+ in_time = np.array(self.in_time)
76
+ frame_time = np.array(self.lick_frame_time)
77
+ try:
78
+ np.savez(
79
+ filename, out_time=out_time, in_time=in_time, frame_time=frame_time
80
+ )
81
+ save_code = True
82
+ except Exception:
83
+ save_code = False
84
+
85
+ return save_code
Repositories/DeepLabCut-live/example_processors/MouseLickLED/teensy_leds/teensy_leds.ino ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const int LED = 0;
2
+ const int IR = 1;
3
+ const int REC = 2;
4
+
5
+ void blink() {
6
+
7
+ Serial.write(!digitalRead(REC));
8
+ Serial.flush();
9
+ noTone(IR);
10
+ while (digitalRead(REC) == 0) {}
11
+
12
+ }
13
+
14
+ void setup() {
15
+
16
+ pinMode(LED, OUTPUT);
17
+ pinMode(IR, OUTPUT);
18
+ pinMode(REC, INPUT);
19
+ attachInterrupt(digitalPinToInterrupt(REC), blink, FALLING);
20
+
21
+ Serial.begin(9600);
22
+ }
23
+
24
+ void loop() {
25
+
26
+ unsigned int ser_avail = Serial.available();
27
+
28
+ while (ser_avail > 0) {
29
+
30
+ unsigned int cmd = Serial.read();
31
+
32
+ if (cmd == 'L') {
33
+
34
+ digitalWrite(LED, !digitalRead(LED));
35
+
36
+ } else if (cmd == 'R') {
37
+
38
+ Serial.write(digitalRead(LED));
39
+ Serial.flush();
40
+
41
+ } else if (cmd == 'I') {
42
+
43
+ tone(IR, 38000);
44
+
45
+ }
46
+
47
+ }
48
+
49
+ }
Repositories/DeepLabCut-live/example_processors/TeensyLaser/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+ from .teensy_laser import *
Repositories/DeepLabCut-live/example_processors/TeensyLaser/teensy_laser.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepLabCut Toolbox (deeplabcut.org)
3
+ © A. & M. Mathis Labs
4
+
5
+ Licensed under GNU Lesser General Public License v3.0
6
+ """
7
+
8
+
9
+ from dlclive.processor.processor import Processor
10
+ import serial
11
+ import struct
12
+ import pickle
13
+ import time
14
+
15
+
16
+ class TeensyLaser(Processor):
17
+ def __init__(
18
+ self, com, baudrate=115200, pulse_freq=50, pulse_width=5, max_stim_dur=0
19
+ ):
20
+
21
+ super().__init__()
22
+ self.ser = serial.Serial(com, baudrate)
23
+ self.pulse_freq = pulse_freq
24
+ self.pulse_width = pulse_width
25
+ self.max_stim_dur = (
26
+ max_stim_dur if (max_stim_dur >= 0) and (max_stim_dur < 65356) else 0
27
+ )
28
+ self.stim_on = False
29
+ self.stim_on_time = []
30
+ self.stim_off_time = []
31
+
32
+ def close_serial(self):
33
+
34
+ self.ser.close()
35
+
36
+ def turn_stim_on(self):
37
+
38
+ # command to activate PWM signal to laser is the letter 'O' followed by three 16 bit integers -- pulse frequency, pulse width, and max stim duration
39
+ if not self.stim_on:
40
+ self.ser.write(
41
+ b"O"
42
+ + struct.pack(
43
+ "HHH", self.pulse_freq, self.pulse_width, self.max_stim_dur
44
+ )
45
+ )
46
+ self.stim_on = True
47
+ self.stim_on_time.append(time.time())
48
+
49
+ def turn_stim_off(self):
50
+
51
+ # command to turn off PWM signal to laser is the letter 'X'
52
+ if self.stim_on:
53
+ self.ser.write(b"X")
54
+ self.stim_on = False
55
+ self.stim_off_time.append(time.time())
56
+
57
+ def process(self, pose, **kwargs):
58
+
59
+ # define criteria to stimulate (e.g. if first point is in a corner of the video)
60
+ box = [[0, 100], [0, 100]]
61
+ if (
62
+ (pose[0][0] > box[0][0])
63
+ and (pose[0][0] < box[0][1])
64
+ and (pose[0][1] > box[1][0])
65
+ and (pose[0][1] < box[1][1])
66
+ ):
67
+ self.turn_stim_on()
68
+ else:
69
+ self.turn_stim_off()
70
+
71
+ return pose
72
+
73
+ def save(self, file=None):
74
+
75
+ ### save stim on and stim off times
76
+ save_code = 0
77
+ if file:
78
+ try:
79
+ pickle.dump(
80
+ {"stim_on": self.stim_on_time, "stim_off": self.stim_off_time},
81
+ open(file, "wb"),
82
+ )
83
+ save_code = 1
84
+ except Exception:
85
+ save_code = -1
86
+ return save_code
Repositories/DeepLabCut-live/example_processors/TeensyLaser/teensy_laser/teensy_laser.ino ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Commands:
3
+ * O = opto on; command = O, frequency, width, duration
4
+ * X = opto off
5
+ * R = reboot
6
+ */
7
+
8
+
9
+ const int opto_pin = 0;
10
+ unsigned int opto_start = 0,
11
+ opto_duty_cycle = 0,
12
+ opto_freq = 0,
13
+ opto_width = 0,
14
+ opto_dur = 0;
15
+
16
+ unsigned int read_int16() {
17
+ union u_tag {
18
+ byte b[2];
19
+ unsigned int val;
20
+ } par;
21
+ for (int i=0; i<2; i++){
22
+ if ((Serial.available() > 0))
23
+ par.b[i] = Serial.read();
24
+ else
25
+ par.b[i] = 0;
26
+ }
27
+ return par.val;
28
+ }
29
+
30
+ void setup() {
31
+ Serial.begin(115200);
32
+ pinMode(opto_pin, OUTPUT);
33
+ }
34
+
35
+ void loop() {
36
+
37
+ unsigned int curr_time = millis();
38
+
39
+ while (Serial.available() > 0) {
40
+
41
+ unsigned int cmd = Serial.read();
42
+
43
+ if(cmd == 'O') {
44
+
45
+ opto_start = curr_time;
46
+ opto_freq = read_int16();
47
+ opto_width = read_int16();
48
+ opto_dur = read_int16();
49
+ if (opto_dur == 0)
50
+ opto_dur = 65355;
51
+ opto_duty_cycle = opto_width * opto_freq * 4096 / 1000;
52
+ analogWriteFrequency(opto_pin, opto_freq);
53
+ analogWrite(opto_pin, opto_duty_cycle);
54
+
55
+ Serial.print(opto_freq);
56
+ Serial.print(',');
57
+ Serial.print(opto_width);
58
+ Serial.print(',');
59
+ Serial.print(opto_dur);
60
+ Serial.print('\n');
61
+ Serial.flush();
62
+
63
+ } else if(cmd == 'X') {
64
+
65
+ analogWrite(opto_pin, 0);
66
+
67
+ } else if(cmd == 'R') {
68
+
69
+ _reboot_Teensyduino_();
70
+
71
+ }
72
+ }
73
+
74
+ if (curr_time > opto_start + opto_dur)
75
+ analogWrite(opto_pin, 0);
76
+
77
+ }
Repositories/DeepLabCut-live/poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
Repositories/DeepLabCut-live/pyproject.toml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "deeplabcut-live"
3
+ version = "1.0.3"
4
+ description = "Class to load exported DeepLabCut networks and perform pose estimation on single frames (from a camera feed)"
5
+ authors = ["A. & M. Mathis Labs <admin@deeplabcut.org>"]
6
+ license = "AGPL-3.0-or-later"
7
+ readme = "README.md"
8
+ homepage = "https://github.com/DeepLabCut/DeepLabCut-live"
9
+ repository = "https://github.com/DeepLabCut/DeepLabCut-live"
10
+ classifiers = [
11
+ "Programming Language :: Python :: 3",
12
+ "Programming Language :: Python :: 3.7",
13
+ "Programming Language :: Python :: 3.8",
14
+ "Programming Language :: Python :: 3.9",
15
+ "Programming Language :: Python :: 3.10",
16
+ "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
17
+ "Operating System :: OS Independent"
18
+ ]
19
+ packages = [
20
+ { include = "dlclive" }
21
+ ]
22
+ include = ["dlclive/check_install/*"]
23
+
24
+ [tool.poetry.scripts]
25
+ dlc-live-test = "dlclive.check_install.check_install:main"
26
+ dlc-live-benchmark = "dlclive.benchmark:main"
27
+
28
+ [tool.poetry.dependencies]
29
+ python = ">=3.7.1,<3.11"
30
+ numpy = "^1.20"
31
+ "ruamel.yaml" = "^0.17.20"
32
+ colorcet = "^3.0.0"
33
+ Pillow = ">=8.0.0"
34
+ py-cpuinfo = ">=5.0.0"
35
+ tqdm = "^4.62.3"
36
+ tensorflow = "^2.7.0,<=2.10"
37
+ pandas = "^1.3"
38
+ tables = "^3.6"
39
+ opencv-python-headless = "^4.5"
40
+ dlclibrary = ">=0.0.2"
41
+
42
+ [tool.poetry.dev-dependencies]
43
+
44
+ [build-system]
45
+ requires = ["poetry-core>=1.0.0"]
46
+ build-backend = "poetry.core.masonry.api"
Repositories/DeepLabCut-live/reinstall.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ poetry shell # activating current environment
2
+ poetry install # creating and installing current project
3
+ poetry build # creating the tarball
4
+ poetry publish # uploading to pypi
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import subprocess
3
+ # subprocess.check_call([sys.executable, '-m', 'pip', 'install','git+https://github.com/facebookresearch/detectron2.git'])
4
+
5
+ import numpy as np
6
+ import gradio as gr
7
+ from Code import Inference
8
+ import detectron2
9
+
10
+
11
+ # import some common detectron2 utilities
12
+ from detectron2 import model_zoo
13
+ from detectron2.engine import DefaultPredictor
14
+ from detectron2.config import get_cfg
15
+ from detectron2.utils.visualizer import Visualizer
16
+ from detectron2.data import MetadataCatalog, DatasetCatalog
17
+
18
+ import os
19
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
20
+
21
+
22
+ sys.path.append("Repositories/")
23
+ from dlclive import DLCLive, Processor
24
+
25
+ def run_Inference(input_img):
26
+
27
+ ###Detectron:
28
+ cfg = get_cfg()
29
+ # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
30
+ cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"))
31
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
32
+ # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
33
+ cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml")
34
+ cfg.MODEL.DEVICE='cpu'
35
+ predictor = DefaultPredictor(cfg)
36
+
37
+ ##DLC:
38
+ dlc_proc = Processor()
39
+ dlc_liveObj = DLCLive("./Weights/DLC_DLC_Segmented_resnet_50_iteration-0_shuffle-1/", processor=dlc_proc)
40
+
41
+ OutImg = Inference.Inference(input_img,predictor,dlc_liveObj,ScaleBBox=1,Dilate=5,DLCThreshold=0.3)
42
+
43
+ return OutImg
44
+
45
+ demo = gr.Interface(run_Inference, gr.Image(), "image")
46
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ deeplabcut
2
+ tensorflow
3
+ opencv-python
4
+ deeplabcut-live
5
+ typing-extensions==4.8.0
6
+ colorcet
7
+ torch
8
+ torchvision