rnwang commited on
Commit
650d33e
1 Parent(s): 7063d73

add inference demo

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +5 -5
  3. app.py +199 -0
  4. data.py +233 -0
  5. data/COCO_image.jpg +0 -0
  6. data/openvino_api.mp4 +3 -0
  7. data/quantize_ort_api.mp4 +3 -0
  8. data/training_api.mp4 +3 -0
  9. data/webcam/input/00000.png +0 -0
  10. data/webcam/input/00001.png +0 -0
  11. data/webcam/input/00002.png +0 -0
  12. data/webcam/input/00003.png +0 -0
  13. data/webcam/input/00004.png +0 -0
  14. data/webcam/input/00005.png +0 -0
  15. data/webcam/input/00006.png +0 -0
  16. data/webcam/input/00007.png +0 -0
  17. data/webcam/input/00008.png +0 -0
  18. data/webcam/input/00009.png +0 -0
  19. data/webcam/input/00010.png +0 -0
  20. data/webcam/input/00011.png +0 -0
  21. data/webcam/input/00012.png +0 -0
  22. data/webcam/input/00013.png +0 -0
  23. data/webcam/input/00014.png +0 -0
  24. data/webcam/input/00015.png +0 -0
  25. data/webcam/input/00016.png +0 -0
  26. data/webcam/input/00017.png +0 -0
  27. data/webcam/input/00018.png +0 -0
  28. data/webcam/input/00019.png +0 -0
  29. data/webcam/input/00020.png +0 -0
  30. data/webcam/input/00021.png +0 -0
  31. data/webcam/input/00022.png +0 -0
  32. data/webcam/input/00023.png +0 -0
  33. data/webcam/input/00024.png +0 -0
  34. data/webcam/input/00025.png +0 -0
  35. data/webcam/input/00026.png +0 -0
  36. data/webcam/input/00027.png +0 -0
  37. data/webcam/input/00028.png +0 -0
  38. data/webcam/input/00029.png +0 -0
  39. data/webcam/input/00030.png +0 -0
  40. data/webcam/input/00031.png +0 -0
  41. data/webcam/input/00032.png +0 -0
  42. data/webcam/input/00033.png +0 -0
  43. data/webcam/input/00034.png +0 -0
  44. data/webcam/input/00035.png +0 -0
  45. data/webcam/input/00036.png +0 -0
  46. data/webcam/input/00037.png +0 -0
  47. data/webcam/input/00038.png +0 -0
  48. data/webcam/input/00039.png +0 -0
  49. data/webcam/input/00040.png +0 -0
  50. data/webcam/input/00041.png +0 -0
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Bigdl Nano Demo
3
- emoji: 💩
4
- colorFrom: blue
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 3.0.20
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: BigDL-Nano Demo
3
+ emoji: 🦄
4
+ colorFrom: yellow
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 3.0.13
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright 2016 The BigDL Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # Part of the code in this file is adapted from
17
+ # https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/eval.py and
18
+ # https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/train.py
19
+
20
+ # MIT License
21
+
22
+ # Copyright (c) 2022 Lorenzo Breschi
23
+
24
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
25
+ # of this software and associated documentation files (the "Software"), to deal
26
+ # in the Software without restriction, including without limitation the rights
27
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
28
+ # copies of the Software, and to permit persons to whom the Software is
29
+ # furnished to do so, subject to the following conditions:
30
+
31
+ # The above copyright notice and this permission notice shall be included in all
32
+ # copies or substantial portions of the Software.
33
+
34
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
37
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
38
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
39
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40
+ # SOFTWARE.
41
+
42
+ import gradio as gr
43
+ import numpy as np
44
+ import time
45
+ from data import write_image_tensor, PatchDataModule, prepare_data, image2tensor, tensor2image
46
+ import torch
47
+ from tqdm import tqdm
48
+ from bigdl.nano.pytorch.trainer import Trainer
49
+ from torch.utils.data import DataLoader
50
+ from pathlib import Path
51
+ from torch.utils.data import Dataset
52
+ import datetime
53
+ import huggingface_hub
54
+
55
+
56
+ device = 'cpu'
57
+ dtype = torch.float32
58
+ MODEL_REPO = 'CVPR/FSPBT'
59
+ ckpt_path = huggingface_hub.hf_hub_download(
60
+ MODEL_REPO, 'generator.pt')
61
+ generator = torch.load(ckpt_path)
62
+ generator.eval()
63
+ generator.to(device, dtype)
64
+ params = {'batch_size': 1,
65
+ 'num_workers': 0}
66
+
67
+
68
+ class ImageDataset(Dataset):
69
+ def __init__(self, img):
70
+ self.imgs = [image2tensor(img)]
71
+ def __getitem__(self, idx: int) -> dict:
72
+ return self.imgs[idx]
73
+
74
+ def __len__(self) -> int:
75
+ return len(self.imgs)
76
+
77
+
78
+ data_path = Path('data/webcam')
79
+ train_image_dd = prepare_data(data_path)
80
+ dm = PatchDataModule(train_image_dd, patch_size=2**6,
81
+ batch_size=2**3, patch_num=2**6)
82
+
83
+ # quantize model
84
+ train_loader = dm.train_dataloader()
85
+ train_loader_iter = iter(train_loader)
86
+ quantized_model = Trainer.quantize(generator, accelerator=None,
87
+ calib_dataloader=train_loader)
88
+
89
+
90
+ def original_transfer(input_img):
91
+ w, h, _ = input_img.shape
92
+ print(datetime.datetime.now())
93
+ print("input size: ", w, h)
94
+ # resize too large image
95
+ if w > 3000 or h > 3000:
96
+ ratio = min(3000 / w, 3000 / h)
97
+ w = int(w * ratio)
98
+ h = int(h * ratio)
99
+ if w % 4 != 0 or h % 4 != 0:
100
+ NW = int((w // 4) * 4)
101
+ NH = int((h // 4) * 4)
102
+ input_img = np.resize(input_img,(NW,NH,3))
103
+ st = time.perf_counter()
104
+ dataset = ImageDataset(input_img)
105
+ loader = DataLoader(dataset, **params)
106
+ with torch.no_grad():
107
+ for inputs in tqdm(loader):
108
+ inputs = inputs.to(device, dtype)
109
+ st = time.perf_counter()
110
+ outputs = generator(inputs)
111
+ ori_time = time.perf_counter() - st
112
+ ori_time = "{:.3f}s".format(ori_time)
113
+ ori_image = np.array(tensor2image(outputs[0]))
114
+ del inputs
115
+ del outputs
116
+ return ori_image, ori_time
117
+
118
+ def nano_transfer(input_img):
119
+ w, h, _ = input_img.shape
120
+ print(datetime.datetime.now())
121
+ print("input size: ", w, h)
122
+ # resize too large image
123
+ if w > 3000 or h > 3000:
124
+ ratio = min(3000 / w, 3000 / h)
125
+ w = int(w * ratio)
126
+ h = int(h * ratio)
127
+ if w % 4 != 0 or h % 4 != 0:
128
+ NW = int((w // 4) * 4)
129
+ NH = int((h // 4) * 4)
130
+ input_img = np.resize(input_img,(NW,NH,3))
131
+ st = time.perf_counter()
132
+ dataset = ImageDataset(input_img)
133
+ loader = DataLoader(dataset, **params)
134
+ with torch.no_grad():
135
+ for inputs in tqdm(loader):
136
+ inputs = inputs.to(device, dtype)
137
+ st = time.perf_counter()
138
+ outputs = quantized_model(inputs)
139
+ nano_time = time.perf_counter() - st
140
+ nano_time = "{:.3f}s".format(nano_time)
141
+ nano_image = np.array(tensor2image(outputs[0]))
142
+ del inputs
143
+ del outputs
144
+ return nano_image, nano_time
145
+
146
+
147
+ def clear():
148
+ return None, None, None, None
149
+
150
+
151
+ demo = gr.Blocks()
152
+
153
+ with demo:
154
+ gr.Markdown("<h1><center>BigDL-Nano inference demo</center></h1>")
155
+ with gr.Row().style(equal_height=False):
156
+ with gr.Column():
157
+ gr.Markdown('''
158
+ <h2>Overview</h2>
159
+
160
+ BigDL-Nano is a library in [BigDL 2.0](https://github.com/intel-analytics/BigDL) that allows the users to transparently accelerate their deep learning pipelines (including data processing, training and inference) by automatically integrating optimized libraries, best-known configurations, and software optimizations. </p>
161
+ The animation on the right shows how the user can easily enable training using BigDL-Nano with just one line of change.
162
+ ''')
163
+ with gr.Column():
164
+ gr.Video(value="data/training_api.mp4")
165
+ gr.Markdown('''
166
+ The below animation shows how the user can easily enable acceleration and quantization using BigDL-Nano with just a couple of lines of code; you may refer to our [CVPR 2022 demo paper](https://arxiv.org/abs/2204.01715) for more details.
167
+ ''')
168
+ with gr.Row().style(equal_height=True):
169
+ with gr.Column():
170
+ gr.Video(value="data/openvino_api.mp4")
171
+ with gr.Column():
172
+ gr.Video(value="data/quantize_ort_api.mp4")
173
+ gr.Markdown('''
174
+ <h2>Demo</h2>
175
+
176
+ This section we show an inference demo by using an image stylization example to demostrate the speedup of the above code when using quantization in BigDL-Nano (about 2~3x inference time speedup).
177
+ This inference demo is adapted from the original [FSPBT-Image-Translation code](https://github.com/rnwzd/FSPBT-Image-Translation),
178
+ and the default image is from [the COCO dataset](https://cocodataset.org/#home).
179
+ ''')
180
+ with gr.Row().style(equal_height=False):
181
+ input_img = gr.Image(label="input image", value="data/COCO_image.jpg", source="upload")
182
+ with gr.Column():
183
+ ori_but = gr.Button("Standard PyTorch Lightning")
184
+ nano_but = gr.Button("BigDL-Nano")
185
+ clear_but = gr.Button("Clear Output")
186
+ with gr.Row().style(equal_height=False):
187
+ with gr.Column():
188
+ ori_time = gr.Text(label="Standard PyTorch Lightning latency")
189
+ ori_image = gr.Image(label="Standard PyTorch Lightning output image")
190
+ with gr.Column():
191
+ nano_time = gr.Text(label="BigDL-Nano latency")
192
+ nano_image = gr.Image(label="BigDL-Nano output image")
193
+
194
+ ori_but.click(original_transfer, inputs=input_img, outputs=[ori_image, ori_time])
195
+ nano_but.click(nano_transfer, inputs=input_img, outputs=[nano_image, nano_time])
196
+ clear_but.click(clear, inputs=None, outputs=[ori_image, ori_time, nano_image, nano_time])
197
+
198
+
199
+ demo.launch(share=True, enable_queue=True)
data.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is copied from https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/data.py
2
+
3
+ # MIT License
4
+
5
+ # Copyright (c) 2022 Lorenzo Breschi
6
+
7
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ # of this software and associated documentation files (the "Software"), to deal
9
+ # in the Software without restriction, including without limitation the rights
10
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ # copies of the Software, and to permit persons to whom the Software is
12
+ # furnished to do so, subject to the following conditions:
13
+
14
+ # The above copyright notice and this permission notice shall be included in all
15
+ # copies or substantial portions of the Software.
16
+
17
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ # SOFTWARE.
24
+
25
+ from typing import Callable, Dict
26
+
27
+ import torch
28
+
29
+ from torch.utils.data import Dataset
30
+
31
+ import torchvision.transforms.functional as F
32
+ from torchvision import transforms
33
+ import pytorch_lightning as pl
34
+
35
+ from collections.abc import Iterable
36
+
37
+
38
+ # image reader writer
39
+ from pathlib import Path
40
+ from PIL import Image
41
+ from typing import Tuple
42
+
43
+
44
+ def read_image(filepath: Path, mode: str = None) -> Image:
45
+ with open(filepath, 'rb') as file:
46
+ image = Image.open(file)
47
+ return image.convert(mode)
48
+
49
+
50
+ image2tensor = transforms.ToTensor()
51
+ tensor2image = transforms.ToPILImage()
52
+
53
+
54
+ def write_image(image: Image, filepath: Path):
55
+ filepath.parent.mkdir(parents=True, exist_ok=True)
56
+ image.save(str(filepath))
57
+
58
+
59
+ def read_image_tensor(filepath: Path, mode: str = 'RGB') -> torch.Tensor:
60
+ return image2tensor(read_image(filepath, mode))
61
+
62
+
63
+ def write_image_tensor(input: torch.Tensor, filepath: Path):
64
+ write_image(tensor2image(input), filepath)
65
+
66
+
67
+ def get_valid_indices(H: int, W: int, patch_size: int, random_overlap: int = 0):
68
+
69
+ vih = torch.arange(random_overlap, H-patch_size -
70
+ random_overlap+1, patch_size)
71
+ viw = torch.arange(random_overlap, W-patch_size -
72
+ random_overlap+1, patch_size)
73
+ if random_overlap > 0:
74
+ rih = torch.randint_like(vih, -random_overlap, random_overlap)
75
+ riw = torch.randint_like(viw, -random_overlap, random_overlap)
76
+ vih += rih
77
+ viw += riw
78
+ vi = torch.stack(torch.meshgrid(vih, viw)).view(2, -1).t()
79
+ return vi
80
+
81
+
82
+ def cut_patches(input: torch.Tensor, indices: Tuple[Tuple[int, int]], patch_size: int, padding: int = 0):
83
+ # TODO use slices to get all patches at the same time ?
84
+
85
+ patches_l = []
86
+ for n in range(len(indices)):
87
+
88
+ patch = F.crop(input, *(indices[n]-padding),
89
+ *(patch_size+padding*2,)*2)
90
+ patches_l.append(patch)
91
+ patches = torch.cat(patches_l, dim=0)
92
+
93
+ return patches
94
+
95
+
96
+ def prepare_data(data_path: Path, read_func: Callable = read_image_tensor) -> Dict:
97
+ """
98
+ Takes a data_path of a folder which contains subfolders with input, target, etc.
99
+ lablelled by the same names.
100
+ :param data_path: Path of the folder containing data
101
+ :param read_func: function that reads data and returns a tensor
102
+ """
103
+ data_dict = {}
104
+
105
+ subdir_names = ["target", "input", "mask"] # ,"helper"
106
+
107
+ # checks only files for which there is an target
108
+ # TODO check for images
109
+ name_ls = [file.name for file in (
110
+ data_path / "target").iterdir() if file.is_file()]
111
+
112
+ subdirs = [data_path / sdn for sdn in subdir_names]
113
+ for sd in subdirs:
114
+ if sd.is_dir():
115
+ data_ls = []
116
+ files = [sd / name for name in name_ls]
117
+ for file in files:
118
+ tensor = read_func(file)
119
+ H, W = tensor.shape[-2:]
120
+ data_ls.append(tensor)
121
+ # TODO check that all sizes match
122
+ data_dict[sd.name] = torch.stack(data_ls, dim=0)
123
+
124
+ data_dict['name'] = name_ls
125
+ data_dict['len'] = len(data_dict['name'])
126
+ data_dict['H'] = H
127
+ data_dict['W'] = W
128
+ return data_dict
129
+
130
+
131
+ # TODO an image is loaded whenever a patch is needed, this may be a bottleneck
132
+ class DataDictLoader():
133
+ def __init__(self, data_dict: Dict,
134
+ batch_size: int = 16,
135
+ max_length: int = 128,
136
+ shuffle: bool = False):
137
+ """
138
+ """
139
+
140
+ self.batch_size = batch_size
141
+ self.shuffle = shuffle
142
+
143
+ self.batch_size = batch_size
144
+
145
+ self.data_dict = data_dict
146
+ self.dataset_len = data_dict['len']
147
+ self.len = self.dataset_len if max_length is None else min(
148
+ self.dataset_len, max_length)
149
+ # Calculate # batches
150
+ num_batches, remainder = divmod(self.len, self.batch_size)
151
+ if remainder > 0:
152
+ num_batches += 1
153
+ self.num_batches = num_batches
154
+
155
+ def __iter__(self):
156
+ if self.shuffle:
157
+ r = torch.randperm(self.dataset_len)
158
+ self.data_dict = {k: v[r] if isinstance(
159
+ v, Iterable) else v for k, v in self.data_dict.items()}
160
+ self.i = 0
161
+ return self
162
+
163
+ def __next__(self):
164
+ if self.i >= self.len:
165
+ raise StopIteration
166
+ batch = {k: v[self.i:self.i+self.batch_size]
167
+ if isinstance(v, Iterable) else v for k, v in self.data_dict.items()}
168
+
169
+ self.i += self.batch_size
170
+ return batch
171
+
172
+ def __len__(self):
173
+ return self.num_batches
174
+
175
+
176
+ class PatchDataModule(pl.LightningDataModule):
177
+
178
+ def __init__(self, data_dict,
179
+ patch_size: int = 2**5,
180
+ batch_size: int = 2**4,
181
+ patch_num: int = 2**6):
182
+ super().__init__()
183
+ self.data_dict = data_dict
184
+ self.H, self.W = data_dict['H'], data_dict['W']
185
+ self.len = data_dict['len']
186
+
187
+ self.batch_size = batch_size
188
+ self.patch_size = patch_size
189
+ self.patch_num = patch_num
190
+
191
+ def dataloader(self, data_dict, **kwargs):
192
+ return DataDictLoader(data_dict, **kwargs)
193
+
194
+ def train_dataloader(self):
195
+ patches = self.cut_patches()
196
+ return self.dataloader(patches, batch_size=self.batch_size, shuffle=True,
197
+ max_length=self.patch_num)
198
+
199
+ def val_dataloader(self):
200
+ return self.dataloader(self.data_dict, batch_size=1)
201
+
202
+ def test_dataloader(self):
203
+ return self.dataloader(self.data_dict) # TODO batch size
204
+
205
+ def cut_patches(self):
206
+ # TODO cycle once
207
+ patch_indices = get_valid_indices(
208
+ self.H, self.W, self.patch_size, self.patch_size//4)
209
+ dd = {k: cut_patches(
210
+ v, patch_indices, self.patch_size) for k, v in self.data_dict.items()
211
+ if isinstance(v, torch.Tensor)
212
+ }
213
+ threshold = 0.1
214
+ mask_p = torch.mean(
215
+ dd.get('mask', torch.ones_like(dd['input'])), dim=(-1, -2, -3))
216
+ masked_idx = (mask_p > threshold).nonzero(as_tuple=True)[0]
217
+ dd = {k: v[masked_idx] for k, v in dd.items()}
218
+ dd['len'] = len(masked_idx)
219
+ dd['H'], dd['W'] = (self.patch_size,)*2
220
+
221
+ return dd
222
+
223
+
224
+ class ImageDataset(Dataset):
225
+ def __init__(self, file_paths: Iterable, read_func: Callable = read_image_tensor):
226
+ self.file_paths = file_paths
227
+
228
+ def __getitem__(self, idx: int) -> dict:
229
+ file = self.file_paths[idx]
230
+ return read_image_tensor(file), file.name
231
+
232
+ def __len__(self) -> int:
233
+ return len(self.file_paths)
data/COCO_image.jpg ADDED
data/openvino_api.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c56fec37521fb739fdffef383676ce27aa7687b0ea3c5322a4eac4e117f85823
3
+ size 3296276
data/quantize_ort_api.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25c17439b411d912fd7d5ad998c4dab21e07d991c2340f6192bc63265142fac1
3
+ size 2121679
data/training_api.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1411796bffeeeffb54b4a6d4d816e25e3d50d92fc06c602d7819823d0cd79b23
3
+ size 3614691
data/webcam/input/00000.png ADDED
data/webcam/input/00001.png ADDED
data/webcam/input/00002.png ADDED
data/webcam/input/00003.png ADDED
data/webcam/input/00004.png ADDED
data/webcam/input/00005.png ADDED
data/webcam/input/00006.png ADDED
data/webcam/input/00007.png ADDED
data/webcam/input/00008.png ADDED
data/webcam/input/00009.png ADDED
data/webcam/input/00010.png ADDED
data/webcam/input/00011.png ADDED
data/webcam/input/00012.png ADDED
data/webcam/input/00013.png ADDED
data/webcam/input/00014.png ADDED
data/webcam/input/00015.png ADDED
data/webcam/input/00016.png ADDED
data/webcam/input/00017.png ADDED
data/webcam/input/00018.png ADDED
data/webcam/input/00019.png ADDED
data/webcam/input/00020.png ADDED
data/webcam/input/00021.png ADDED
data/webcam/input/00022.png ADDED
data/webcam/input/00023.png ADDED
data/webcam/input/00024.png ADDED
data/webcam/input/00025.png ADDED
data/webcam/input/00026.png ADDED
data/webcam/input/00027.png ADDED
data/webcam/input/00028.png ADDED
data/webcam/input/00029.png ADDED
data/webcam/input/00030.png ADDED
data/webcam/input/00031.png ADDED
data/webcam/input/00032.png ADDED
data/webcam/input/00033.png ADDED
data/webcam/input/00034.png ADDED
data/webcam/input/00035.png ADDED
data/webcam/input/00036.png ADDED
data/webcam/input/00037.png ADDED
data/webcam/input/00038.png ADDED
data/webcam/input/00039.png ADDED
data/webcam/input/00040.png ADDED
data/webcam/input/00041.png ADDED