Ge Zheng commited on
Commit
6f22dca
·
1 Parent(s): 7d553c2

fix(data): fix bug in voc dataloader (#11)

Browse files
README.md CHANGED
@@ -90,7 +90,6 @@ python tools/demo.py video -n yolox-s -c /path/to/your/yolox_s.pth.tar --path /p
90
  Step1. Prepare COCO dataset
91
  ```shell
92
  cd <YOLOX_HOME>
93
- mkdir datasets
94
  ln -s /path/to/your/COCO ./datasets/COCO
95
  ```
96
 
 
90
  Step1. Prepare COCO dataset
91
  ```shell
92
  cd <YOLOX_HOME>
 
93
  ln -s /path/to/your/COCO ./datasets/COCO
94
  ```
95
 
docs/.gitkeep DELETED
File without changes
docs/train_custom_data.md CHANGED
@@ -21,7 +21,6 @@ You can also write the Dataset by you own. Let's take the [VOC](../yolox/data/da
21
  img, target = self.preproc(img, target, self.input_dim)
22
 
23
  return img, target, img_info, img_id
24
-
25
  ```
26
 
27
  One more thing worth noting is that you should also implement "[pull_item](../yolox/data/datasets/voc.py#L129)" and "[load_anno](../yolox/data/datasets/voc.py#L121)" method for the Mosiac and MixUp augmentation.
@@ -29,6 +28,12 @@ One more thing worth noting is that you should also implement "[pull_item](../yo
29
  **Step 3** Prepare the evaluator. We currently have [COCO evaluator](../yolox/evaluators/coco_evaluator.py) and [VOC evaluator](../yolox/evaluators/voc_evaluator.py).
30
  If you have your own format data or evaluation metric, you may write your own evaluator.
31
 
 
 
 
 
 
 
32
  ## 2. Create your Exp file to control everything
33
  We put everything involved in a model to one single Exp file, including model setting, training setting, and testing setting.
34
 
 
21
  img, target = self.preproc(img, target, self.input_dim)
22
 
23
  return img, target, img_info, img_id
 
24
  ```
25
 
26
  One more thing worth noting is that you should also implement "[pull_item](../yolox/data/datasets/voc.py#L129)" and "[load_anno](../yolox/data/datasets/voc.py#L121)" method for the Mosiac and MixUp augmentation.
 
28
  **Step 3** Prepare the evaluator. We currently have [COCO evaluator](../yolox/evaluators/coco_evaluator.py) and [VOC evaluator](../yolox/evaluators/voc_evaluator.py).
29
  If you have your own format data or evaluation metric, you may write your own evaluator.
30
 
31
+ **Step 4** Put your dataset under $YOLOX_DIR/datasets$, for VOC:
32
+ ```shell
33
+ ln -s /path/to/your/VOCdevkit ./datasets/VOCdevkit
34
+ ```
35
+ * The path "VOCdevkit" will be used in your exp file described in next section.Specifically, in "get_data_loader" and "get_eval_loader" function.
36
+
37
  ## 2. Create your Exp file to control everything
38
  We put everything involved in a model to one single Exp file, including model setting, training setting, and testing setting.
39
 
exps/example/yolox_voc/yolox_voc_s.py CHANGED
@@ -6,7 +6,7 @@ import torch.nn as nn
6
  import torch.distributed as dist
7
 
8
  from yolox.exp import Exp as MyExp
9
-
10
 
11
  class Exp(MyExp):
12
  def __init__(self):
@@ -14,7 +14,6 @@ class Exp(MyExp):
14
  self.num_classes = 20
15
  self.depth = 0.33
16
  self.width = 0.50
17
- self.eval_interval = 2
18
  self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
19
 
20
  def get_data_loader(self, batch_size, is_distributed, no_aug=False):
@@ -28,7 +27,7 @@ class Exp(MyExp):
28
  )
29
 
30
  dataset = VOCDetection(
31
- data_dir='/data/Datasets/VOCdevkit',
32
  image_sets=[('2007', 'trainval'), ('2012', 'trainval')],
33
  img_size=self.input_size,
34
  preproc=TrainTransform(
@@ -83,7 +82,7 @@ class Exp(MyExp):
83
  from yolox.data import VOCDetection, ValTransform
84
 
85
  valdataset = VOCDetection(
86
- data_dir='/data/Datasets/VOCdevkit',
87
  image_sets=[('2007', 'test')],
88
  img_size=self.test_size,
89
  preproc=ValTransform(
 
6
  import torch.distributed as dist
7
 
8
  from yolox.exp import Exp as MyExp
9
+ from yolox.data import get_yolox_datadir
10
 
11
  class Exp(MyExp):
12
  def __init__(self):
 
14
  self.num_classes = 20
15
  self.depth = 0.33
16
  self.width = 0.50
 
17
  self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
18
 
19
  def get_data_loader(self, batch_size, is_distributed, no_aug=False):
 
27
  )
28
 
29
  dataset = VOCDetection(
30
+ data_dir=os.path.join(get_yolox_datadir(), "VOCdevkit"),
31
  image_sets=[('2007', 'trainval'), ('2012', 'trainval')],
32
  img_size=self.input_size,
33
  preproc=TrainTransform(
 
82
  from yolox.data import VOCDetection, ValTransform
83
 
84
  valdataset = VOCDetection(
85
+ data_dir=os.path.join(get_yolox_datadir(), "VOCdevkit"),
86
  image_sets=[('2007', 'test')],
87
  img_size=self.test_size,
88
  preproc=ValTransform(
yolox/data/datasets/__init__.py CHANGED
@@ -6,3 +6,4 @@ from .coco import COCODataset
6
  from .coco_classes import COCO_CLASSES
7
  from .datasets_wrapper import ConcatDataset, Dataset, MixConcatDataset
8
  from .mosaicdetection import MosaicDetection
 
 
6
  from .coco_classes import COCO_CLASSES
7
  from .datasets_wrapper import ConcatDataset, Dataset, MixConcatDataset
8
  from .mosaicdetection import MosaicDetection
9
+ from .voc import VOCDetection
yolox/evaluators/__init__.py CHANGED
@@ -3,3 +3,4 @@
3
  # Copyright (c) Megvii, Inc. and its affiliates.
4
 
5
  from .coco_evaluator import COCOEvaluator
 
 
3
  # Copyright (c) Megvii, Inc. and its affiliates.
4
 
5
  from .coco_evaluator import COCOEvaluator
6
+ from .voc_evaluator import VOCEvaluator