code
stringlengths 17
6.64M
|
---|
def motp_custom(df: DataFrame, num_detections: float) -> float:
"\n Multiple object tracker precision.\n Based on py-motmetric's motp function.\n Additionally we check whether there are any detections.\n :param df: Motmetrics dataframe that is required, but not used here.\n :param num_detections: The number of detections.\n :return: The MOTP or 0 if there are no detections.\n "
if (num_detections == 0):
return np.nan
return (df.noraw['D'].sum() / num_detections)
|
def faf(df: DataFrame, num_false_positives: float, num_frames: float) -> float:
'\n The average number of false alarms per frame.\n :param df: Motmetrics dataframe that is required, but not used here.\n :param num_false_positives: The number of false positives.\n :param num_frames: The number of frames.\n :return: Average FAF.\n '
return ((num_false_positives / num_frames) * 100)
|
def num_fragmentations_custom(df: DataFrame, obj_frequencies: DataFrame) -> float:
"\n Total number of switches from tracked to not tracked.\n Based on py-motmetric's num_fragmentations function.\n :param df: Motmetrics dataframe that is required, but not used here.\n :param obj_frequencies: Stores the GT tracking_ids and their frequencies.\n :return: The number of fragmentations.\n "
fra = 0
for o in obj_frequencies.index:
dfo = df.noraw[(df.noraw.OId == o)]
notmiss = dfo[(dfo.Type != 'MISS')]
if (len(notmiss) == 0):
continue
first = notmiss.index[0]
last = notmiss.index[(- 1)]
diffs = dfo.loc[first:last].Type.apply((lambda x: (1 if (x == 'MISS') else 0))).diff()
fra += diffs[(diffs == 1)].count()
return fra
|
class MOTAccumulatorCustom(motmetrics.mot.MOTAccumulator):
def __init__(self):
super().__init__()
@staticmethod
def new_event_dataframe_with_data(indices, events):
"\n Create a new DataFrame filled with data.\n This version overwrites the original in MOTAccumulator achieves about 2x speedups.\n\n Params\n ------\n indices: list\n list of tuples (frameid, eventid)\n events: list\n list of events where each event is a list containing\n 'Type', 'OId', HId', 'D'\n "
idx = pd.MultiIndex.from_tuples(indices, names=['FrameId', 'Event'])
df = pd.DataFrame(events, index=idx, columns=['Type', 'OId', 'HId', 'D'])
return df
@staticmethod
def new_event_dataframe():
' Create a new DataFrame for event tracking. '
idx = pd.MultiIndex(levels=[[], []], codes=[[], []], names=['FrameId', 'Event'])
cats = pd.Categorical([], categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH'])
df = pd.DataFrame(OrderedDict([('Type', pd.Series(cats)), ('OId', pd.Series(dtype=object)), ('HId', pd.Series(dtype=object)), ('D', pd.Series(dtype=float))]), index=idx)
return df
@property
def events(self):
if self.dirty_events:
self.cached_events_df = MOTAccumulatorCustom.new_event_dataframe_with_data(self._indices, self._events)
self.dirty_events = False
return self.cached_events_df
@staticmethod
def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, update_hids=True, return_mappings=False):
'Merge dataframes.\n\n Params\n ------\n dfs : list of pandas.DataFrame or MotAccumulator\n A list of event containers to merge\n\n Kwargs\n ------\n update_frame_indices : boolean, optional\n Ensure that frame indices are unique in the merged container\n update_oids : boolean, unique\n Ensure that object ids are unique in the merged container\n update_hids : boolean, unique\n Ensure that hypothesis ids are unique in the merged container\n return_mappings : boolean, unique\n Whether or not to return mapping information\n\n Returns\n -------\n df : pandas.DataFrame\n Merged event data frame\n '
mapping_infos = []
new_oid = count()
new_hid = count()
r = MOTAccumulatorCustom.new_event_dataframe()
for df in dfs:
if isinstance(df, MOTAccumulatorCustom):
df = df.events
copy = df.copy()
infos = {}
if update_frame_indices:
next_frame_id = max((r.index.get_level_values(0).max() + 1), r.index.get_level_values(0).unique().shape[0])
if np.isnan(next_frame_id):
next_frame_id = 0
copy.index = copy.index.map((lambda x: ((x[0] + next_frame_id), x[1])))
infos['frame_offset'] = next_frame_id
if update_oids:
oid_map = dict(([oid, str(next(new_oid))] for oid in copy['OId'].dropna().unique()))
copy['OId'] = copy['OId'].map((lambda x: oid_map[x]), na_action='ignore')
infos['oid_map'] = oid_map
if update_hids:
hid_map = dict(([hid, str(next(new_hid))] for hid in copy['HId'].dropna().unique()))
copy['HId'] = copy['HId'].map((lambda x: hid_map[x]), na_action='ignore')
infos['hid_map'] = hid_map
r = r.append(copy)
mapping_infos.append(infos)
if return_mappings:
return (r, mapping_infos)
else:
return r
|
def summary_plot(cfg: TrackingConfig, md_list: TrackingMetricDataList, ncols: int=2, savepath: str=None) -> None:
'\n Creates a summary plot with which includes all traditional metrics for each class.\n :param cfg: A TrackingConfig object.\n :param md_list: TrackingMetricDataList instance.\n :param ncols: How many columns the resulting plot should have.\n :param savepath: If given, saves the the rendering here instead of displaying.\n '
rel_metrics = ['motar', 'motp', 'mota', 'recall', 'mt', 'ml', 'faf', 'tp', 'fp', 'fn', 'ids', 'frag', 'tid', 'lgd']
n_metrics = len(rel_metrics)
nrows = int(np.ceil((n_metrics / ncols)))
(_, axes) = plt.subplots(nrows=nrows, ncols=ncols, figsize=((7.5 * ncols), (5 * nrows)))
for (ind, metric_name) in enumerate(rel_metrics):
row = (ind // ncols)
col = np.mod(ind, ncols)
recall_metric_curve(cfg, md_list, metric_name, ax=axes[(row, col)])
plt.tight_layout()
if (savepath is not None):
plt.savefig(savepath)
plt.close()
|
def recall_metric_curve(cfg: TrackingConfig, md_list: TrackingMetricDataList, metric_name: str, savepath: str=None, ax: Axis=None) -> None:
'\n Plot the recall versus metric curve for the given metric.\n :param cfg: A TrackingConfig object.\n :param md_list: TrackingMetricDataList instance.\n :param metric_name: The name of the metric to plot.\n :param savepath: If given, saves the the rendering here instead of displaying.\n :param ax: Axes onto which to render or None to create a new axis.\n '
min_recall = cfg.min_recall
if (ax is None):
(_, ax) = plt.subplots(1, 1, figsize=(7.5, 5))
ax = setup_axis(xlabel='Recall', ylabel=metric_name.upper(), xlim=1, ylim=None, min_recall=min_recall, ax=ax, show_spines='bottomleft')
for (tracking_name, md) in md_list.md.items():
confidence = md.confidence
recalls = md.recall_hypo
values = md.get_metric(metric_name)
valid = np.where(np.logical_not(np.isnan(confidence)))[0]
if (len(valid) == 0):
continue
first_valid = valid[0]
assert (not np.isnan(confidence[(- 1)]))
recalls = recalls[first_valid:]
values = values[first_valid:]
ax.plot(recalls, values, label=('%s' % cfg.pretty_tracking_names[tracking_name]), color=cfg.tracking_colors[tracking_name])
if (metric_name in ['mt', 'ml', 'faf', 'tp', 'fp', 'fn', 'ids', 'frag']):
ax.set_yscale('symlog')
if (metric_name in ['amota', 'motar', 'recall', 'mota']):
ax.set_ylim(0, 1)
elif (metric_name != 'motp'):
ax.set_ylim(bottom=0)
ax.legend(loc='upper right', borderaxespad=0)
plt.tight_layout()
if (savepath is not None):
plt.savefig(savepath)
plt.close()
|
class TrackingRenderer():
'\n Class that renders the tracking results in BEV and saves them to a folder.\n '
def __init__(self, save_path):
'\n :param save_path: Output path to save the renderings.\n '
self.save_path = save_path
self.id2color = {}
def render(self, events: DataFrame, timestamp: int, frame_gt: List[TrackingBox], frame_pred: List[TrackingBox]) -> None:
'\n Render function for a given scene timestamp\n :param events: motmetrics events for that particular\n :param timestamp: timestamp for the rendering\n :param frame_gt: list of ground truth boxes\n :param frame_pred: list of prediction boxes\n '
print('Rendering {}'.format(timestamp))
switches = events[(events.Type == 'SWITCH')]
switch_ids = switches.HId.values
(fig, ax) = plt.subplots()
for b in frame_gt:
color = 'k'
box = Box(b.ego_translation, b.size, Quaternion(b.rotation), name=b.tracking_name, token=b.tracking_id)
box.render(ax, view=np.eye(4), colors=(color, color, color), linewidth=1)
for b in frame_pred:
box = Box(b.ego_translation, b.size, Quaternion(b.rotation), name=b.tracking_name, token=b.tracking_id)
if (b.tracking_id not in self.id2color.keys()):
self.id2color[b.tracking_id] = ((float((hash((b.tracking_id + 'r')) % 256)) / 255), (float((hash((b.tracking_id + 'g')) % 256)) / 255), (float((hash((b.tracking_id + 'b')) % 256)) / 255))
if (b.tracking_id in switch_ids):
color = self.id2color[b.tracking_id]
box.render(ax, view=np.eye(4), colors=('r', 'r', color))
else:
color = self.id2color[b.tracking_id]
box.render(ax, view=np.eye(4), colors=(color, color, color))
plt.scatter(0, 0, s=96, facecolors='none', edgecolors='k', marker='o')
plt.xlim((- 50), 50)
plt.ylim((- 50), 50)
fig.savefig(os.path.join(self.save_path, '{}.png'.format(timestamp)))
plt.close(fig)
|
class TestAlgo(unittest.TestCase):
@staticmethod
def single_scene() -> Tuple[(str, Dict[(str, Dict[(int, List[TrackingBox])])])]:
class_name = 'car'
box = TrackingBox(translation=(0, 0, 0), tracking_id='ta', tracking_name=class_name, tracking_score=0.5)
timestamp_boxes_gt = {0: [copy.deepcopy(box)], 1: [copy.deepcopy(box)], 2: [copy.deepcopy(box)], 3: [copy.deepcopy(box)]}
timestamp_boxes_gt[0][0].sample_token = 'a'
timestamp_boxes_gt[1][0].sample_token = 'b'
timestamp_boxes_gt[2][0].sample_token = 'c'
timestamp_boxes_gt[3][0].sample_token = 'd'
tracks_gt = {'scene-1': timestamp_boxes_gt}
return (class_name, tracks_gt)
def test_gt_submission(self):
' Test with GT submission. '
cfg = config_factory('tracking_nips_2019')
(class_name, tracks_gt) = TestAlgo.single_scene()
verbose = False
timestamp_boxes_pred = copy.deepcopy(tracks_gt['scene-1'])
tracks_pred = {'scene-1': timestamp_boxes_pred}
ev = TrackingEvaluation(tracks_gt, tracks_pred, class_name, cfg.dist_fcn_callable, cfg.dist_th_tp, cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=cfg.metric_worst, verbose=verbose)
md = ev.accumulate()
assert np.all((md.tp == 4))
assert np.all((md.fn == 0))
assert np.all((md.fp == 0))
assert np.all((md.lgd == 0))
assert np.all((md.tid == 0))
assert np.all((md.frag == 0))
assert np.all((md.ids == 0))
def test_empty_submission(self):
' Test a submission with no predictions. '
cfg = config_factory('tracking_nips_2019')
(class_name, tracks_gt) = TestAlgo.single_scene()
verbose = False
timestamp_boxes_pred = copy.deepcopy(tracks_gt['scene-1'])
for (timestamp, box) in timestamp_boxes_pred.items():
timestamp_boxes_pred[timestamp] = []
tracks_pred = {'scene-1': timestamp_boxes_pred}
ev = TrackingEvaluation(tracks_gt, tracks_pred, class_name, cfg.dist_fcn_callable, cfg.dist_th_tp, cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=cfg.metric_worst, verbose=verbose)
md = ev.accumulate()
assert np.all((md.mota == 0))
assert np.all((md.motar == 0))
assert np.all(np.isnan(md.recall_hypo))
assert np.all((md.tp == 0))
assert np.all((md.fn == 4))
assert np.all(np.isnan(md.fp))
assert np.all((md.lgd == 20))
assert np.all((md.tid == 20))
assert np.all(np.isnan(md.frag))
assert np.all(np.isnan(md.ids))
def test_drop_prediction(self):
' Drop one prediction from the GT submission. '
cfg = config_factory('tracking_nips_2019')
(class_name, tracks_gt) = TestAlgo.single_scene()
verbose = False
timestamp_boxes_pred = copy.deepcopy(tracks_gt['scene-1'])
timestamp_boxes_pred[1] = []
tracks_pred = {'scene-1': timestamp_boxes_pred}
ev = TrackingEvaluation(tracks_gt, tracks_pred, class_name, cfg.dist_fcn_callable, cfg.dist_th_tp, cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=cfg.metric_worst, verbose=verbose)
md = ev.accumulate()
first_achieved = np.where((md.recall_hypo <= 0.75))[0][0]
assert np.all(np.isnan(md.confidence[:first_achieved]))
assert (md.tp[first_achieved] == 3)
assert (md.fp[first_achieved] == 0)
assert (md.fn[first_achieved] == 1)
assert (md.lgd[first_achieved] == 0.5)
assert (md.tid[first_achieved] == 0)
assert (md.frag[first_achieved] == 1)
assert (md.ids[first_achieved] == 0)
def test_drop_prediction_multiple(self):
' Drop the first three predictions from the GT submission. '
cfg = config_factory('tracking_nips_2019')
(class_name, tracks_gt) = TestAlgo.single_scene()
verbose = False
timestamp_boxes_pred = copy.deepcopy(tracks_gt['scene-1'])
timestamp_boxes_pred[0] = []
timestamp_boxes_pred[1] = []
timestamp_boxes_pred[2] = []
tracks_pred = {'scene-1': timestamp_boxes_pred}
ev = TrackingEvaluation(tracks_gt, tracks_pred, class_name, cfg.dist_fcn_callable, cfg.dist_th_tp, cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=cfg.metric_worst, verbose=verbose)
md = ev.accumulate()
first_achieved = np.where((md.recall_hypo <= 0.25))[0][0]
assert np.all(np.isnan(md.confidence[:first_achieved]))
assert (md.tp[first_achieved] == 1)
assert (md.fp[first_achieved] == 0)
assert (md.fn[first_achieved] == 3)
assert (md.lgd[first_achieved] == (3 * 0.5))
assert (md.tid[first_achieved] == (3 * 0.5))
assert (md.frag[first_achieved] == 0)
assert (md.ids[first_achieved] == 0)
def test_identity_switch(self):
' Change the tracking_id of one frame from the GT submission. '
cfg = config_factory('tracking_nips_2019')
(class_name, tracks_gt) = TestAlgo.single_scene()
verbose = False
timestamp_boxes_pred = copy.deepcopy(tracks_gt['scene-1'])
timestamp_boxes_pred[2][0].tracking_id = 'tb'
tracks_pred = {'scene-1': timestamp_boxes_pred}
ev = TrackingEvaluation(tracks_gt, tracks_pred, class_name, cfg.dist_fcn_callable, cfg.dist_th_tp, cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=cfg.metric_worst, verbose=verbose)
md = ev.accumulate()
first_achieved = np.where((md.recall_hypo <= 0.5))[0][0]
assert (md.tp[first_achieved] == 2)
assert (md.fp[first_achieved] == 0)
assert (md.fn[first_achieved] == 0)
assert (md.lgd[first_achieved] == 0)
assert (md.tid[first_achieved] == 0)
assert (md.frag[first_achieved] == 0)
assert (md.ids[first_achieved] == 2)
def test_drop_gt(self):
' Drop one box from the GT. '
cfg = config_factory('tracking_nips_2019')
(class_name, tracks_gt) = TestAlgo.single_scene()
verbose = False
timestamp_boxes_pred = copy.deepcopy(tracks_gt['scene-1'])
tracks_gt['scene-1'][1] = []
tracks_pred = {'scene-1': timestamp_boxes_pred}
ev = TrackingEvaluation(tracks_gt, tracks_pred, class_name, cfg.dist_fcn_callable, cfg.dist_th_tp, cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=cfg.metric_worst, verbose=verbose)
md = ev.accumulate()
assert np.all((md.tp == 3))
assert np.all((md.fp == 1))
assert np.all((md.fn == 0))
assert np.all((md.lgd == 0.5))
assert np.all((md.tid == 0))
assert np.all((md.frag == 0))
assert np.all((md.ids == 0))
def test_drop_gt_interpolate(self):
' Drop one box from the GT and interpolate the results to fill in that box. '
cfg = config_factory('tracking_nips_2019')
(class_name, tracks_gt) = TestAlgo.single_scene()
verbose = False
timestamp_boxes_pred = copy.deepcopy(tracks_gt['scene-1'])
tracks_gt['scene-1'][1] = []
tracks_pred = {'scene-1': timestamp_boxes_pred}
tracks_gt['scene-1'] = interpolate_tracks(defaultdict(list, tracks_gt['scene-1']))
ev = TrackingEvaluation(tracks_gt, tracks_pred, class_name, cfg.dist_fcn_callable, cfg.dist_th_tp, cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=cfg.metric_worst, verbose=verbose)
md = ev.accumulate()
assert np.all((md.tp == 4))
assert np.all((md.fp == 0))
assert np.all((md.fn == 0))
assert np.all((md.lgd == 0))
assert np.all((md.tid == 0))
assert np.all((md.frag == 0))
assert np.all((md.ids == 0))
def test_scenarios(self):
' More flexible scenario test structure. '
def create_tracks(_scenario, tag=None):
tracks = {}
for (entry_id, entry) in enumerate(_scenario['input'][('pos_' + tag)]):
tracking_id = 'tag_{}'.format(entry_id)
for (timestamp, pos) in enumerate(entry):
if (timestamp not in tracks.keys()):
tracks[timestamp] = []
box = TrackingBox(translation=(pos[0], pos[1], 0.0), tracking_id=tracking_id, tracking_name='car', tracking_score=0.5)
tracks[timestamp].append(box)
return tracks
cfg = config_factory('tracking_nips_2019')
for scenario in get_scenarios():
tracks_gt = {'scene-1': create_tracks(scenario, tag='gt')}
tracks_pred = {'scene-1': create_tracks(scenario, tag='pred')}
ev = TrackingEvaluation(tracks_gt, tracks_pred, 'car', cfg.dist_fcn_callable, cfg.dist_th_tp, cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=cfg.metric_worst, verbose=False)
md = ev.accumulate()
for (key, value) in scenario['output'].items():
metric_values = getattr(md, key)
metric_values = metric_values[np.logical_not(np.isnan(metric_values))]
assert np.all((metric_values == value))
|
class TestMain(unittest.TestCase):
res_mockup = 'nusc_eval.json'
res_eval_folder = 'tmp'
def tearDown(self):
if os.path.exists(self.res_mockup):
os.remove(self.res_mockup)
if os.path.exists(self.res_eval_folder):
shutil.rmtree(self.res_eval_folder)
@staticmethod
def _mock_submission(nusc: NuScenes, split: str, add_errors: bool=False) -> Dict[(str, dict)]:
'\n Creates "reasonable" submission (results and metadata) by looping through the mini-val set, adding 1 GT\n prediction per sample. Predictions will be permuted randomly along all axes.\n :param nusc: NuScenes instance.\n :param split: Dataset split to use.\n :param add_errors: Whether to use GT or add errors to it.\n '
cfg = config_factory('tracking_nips_2019')
def random_class(category_name: str, _add_errors: bool=False) -> Optional[str]:
class_names = sorted(cfg.tracking_names)
tmp = category_to_tracking_name(category_name)
if (tmp is None):
return None
elif ((not _add_errors) or (np.random.rand() < 0.9)):
return tmp
else:
return class_names[np.random.randint(0, (len(class_names) - 1))]
def random_id(instance_token: str, _add_errors: bool=False) -> str:
if ((not _add_errors) or (np.random.rand() < 0.9)):
_tracking_id = (instance_token + '_pred')
else:
_tracking_id = str(np.random.randint(0, sys.maxsize))
return _tracking_id
mock_meta = {'use_camera': False, 'use_lidar': True, 'use_radar': False, 'use_map': False, 'use_external': False}
mock_results = {}
splits = create_splits_scenes()
val_samples = []
for sample in nusc.sample:
if (nusc.get('scene', sample['scene_token'])['name'] in splits[split]):
val_samples.append(sample)
instance_to_score = dict()
for sample in tqdm(val_samples, leave=False):
sample_res = []
for ann_token in sample['anns']:
ann = nusc.get('sample_annotation', ann_token)
translation = np.array(ann['translation'])
size = np.array(ann['size'])
rotation = np.array(ann['rotation'])
velocity = nusc.box_velocity(ann_token)[:2]
tracking_id = random_id(ann['instance_token'], _add_errors=add_errors)
tracking_name = random_class(ann['category_name'], _add_errors=add_errors)
if (tracking_name is None):
continue
num_pts = (ann['num_lidar_pts'] + ann['num_radar_pts'])
if (num_pts == 0):
continue
if (ann['instance_token'] not in instance_to_score):
instance_to_score[ann['instance_token']] = random.random()
tracking_score = instance_to_score[ann['instance_token']]
tracking_score = np.clip((tracking_score + (random.random() * 0.3)), 0, 1)
if add_errors:
translation += (4 * (np.random.rand(3) - 0.5))
size *= (np.random.rand(3) + 0.5)
rotation += ((np.random.rand(4) - 0.5) * 0.1)
velocity *= (np.random.rand(3)[:2] + 0.5)
sample_res.append({'sample_token': sample['token'], 'translation': list(translation), 'size': list(size), 'rotation': list(rotation), 'velocity': list(velocity), 'tracking_id': tracking_id, 'tracking_name': tracking_name, 'tracking_score': tracking_score})
mock_results[sample['token']] = sample_res
mock_submission = {'meta': mock_meta, 'results': mock_results}
return mock_submission
@unittest.skip
def basic_test(self, eval_set: str='mini_val', add_errors: bool=False, render_curves: bool=False) -> Dict[(str, Any)]:
'\n Run the evaluation with fixed randomness on the specified subset, with or without introducing errors in the\n submission.\n :param eval_set: Which split to evaluate on.\n :param add_errors: Whether to use GT as submission or introduce additional errors.\n :param render_curves: Whether to render stats curves to disk.\n :return: The metrics returned by the evaluation.\n '
random.seed(42)
np.random.seed(42)
assert ('NUSCENES' in os.environ), 'Set NUSCENES env. variable to enable tests.'
if eval_set.startswith('mini'):
version = 'v1.0-mini'
elif (eval_set == 'test'):
version = 'v1.0-test'
else:
version = 'v1.0-trainval'
nusc = NuScenes(version=version, dataroot=os.environ['NUSCENES'], verbose=False)
with open(self.res_mockup, 'w') as f:
mock = self._mock_submission(nusc, eval_set, add_errors=add_errors)
json.dump(mock, f, indent=2)
cfg = config_factory('tracking_nips_2019')
nusc_eval = TrackingEval(cfg, self.res_mockup, eval_set=eval_set, output_dir=self.res_eval_folder, nusc_version=version, nusc_dataroot=os.environ['NUSCENES'], verbose=False)
metrics = nusc_eval.main(render_curves=render_curves)
return metrics
@unittest.skip
def test_delta_mock(self, eval_set: str='mini_val', render_curves: bool=False):
'\n This tests runs the evaluation for an arbitrary random set of predictions.\n This score is then captured in this very test such that if we change the eval code,\n this test will trigger if the results changed.\n :param eval_set: Which set to evaluate on.\n :param render_curves: Whether to render stats curves to disk.\n '
metrics = self.basic_test(eval_set, add_errors=True, render_curves=render_curves)
if (eval_set == 'mini_val'):
self.assertAlmostEqual(metrics['amota'], 0.23766771095785147)
self.assertAlmostEqual(metrics['amotp'], 1.5275400961369252)
self.assertAlmostEqual(metrics['motar'], 0.3726570200013319)
self.assertAlmostEqual(metrics['mota'], 0.25003943918566174)
self.assertAlmostEqual(metrics['motp'], 1.2976508610883917)
else:
print(('Skipping checks due to choice of custom eval_set: %s' % eval_set))
@unittest.skip
def test_delta_gt(self, eval_set: str='mini_val', render_curves: bool=False):
'\n This tests runs the evaluation with the ground truth used as predictions.\n This should result in a perfect score for every metric.\n This score is then captured in this very test such that if we change the eval code,\n this test will trigger if the results changed.\n :param eval_set: Which set to evaluate on.\n :param render_curves: Whether to render stats curves to disk.\n '
metrics = self.basic_test(eval_set, add_errors=False, render_curves=render_curves)
if (eval_set == 'mini_val'):
self.assertAlmostEqual(metrics['amota'], 1.0)
self.assertAlmostEqual(metrics['amotp'], 0.0, delta=1e-05)
self.assertAlmostEqual(metrics['motar'], 1.0)
self.assertAlmostEqual(metrics['recall'], 1.0)
self.assertAlmostEqual(metrics['mota'], 1.0)
self.assertAlmostEqual(metrics['motp'], 0.0, delta=1e-05)
self.assertAlmostEqual(metrics['faf'], 0.0)
self.assertAlmostEqual(metrics['ml'], 0.0)
self.assertAlmostEqual(metrics['fp'], 0.0)
self.assertAlmostEqual(metrics['fn'], 0.0)
self.assertAlmostEqual(metrics['ids'], 0.0)
self.assertAlmostEqual(metrics['frag'], 0.0)
self.assertAlmostEqual(metrics['tid'], 0.0)
self.assertAlmostEqual(metrics['lgd'], 0.0)
else:
print(('Skipping checks due to choice of custom eval_set: %s' % eval_set))
|
def category_to_tracking_name(category_name: str) -> Optional[str]:
'\n Default label mapping from nuScenes to nuScenes tracking classes.\n :param category_name: Generic nuScenes class.\n :return: nuScenes tracking class.\n '
tracking_mapping = {'vehicle.bicycle': 'bicycle', 'vehicle.bus.bendy': 'bus', 'vehicle.bus.rigid': 'bus', 'vehicle.car': 'car', 'vehicle.motorcycle': 'motorcycle', 'human.pedestrian.adult': 'pedestrian', 'human.pedestrian.child': 'pedestrian', 'human.pedestrian.construction_worker': 'pedestrian', 'human.pedestrian.police_officer': 'pedestrian', 'vehicle.trailer': 'trailer', 'vehicle.truck': 'truck'}
if (category_name in tracking_mapping):
return tracking_mapping[category_name]
else:
return None
|
def metric_name_to_print_format(metric_name) -> str:
'\n Get the standard print format (numerical precision) for each metric.\n :param metric_name: The lowercase metric name.\n :return: The print format.\n '
if (metric_name in ['amota', 'amotp', 'motar', 'recall', 'mota', 'motp']):
print_format = '%.3f'
elif (metric_name in ['tid', 'lgd']):
print_format = '%.2f'
elif (metric_name in ['faf']):
print_format = '%.1f'
else:
print_format = '%d'
return print_format
|
def print_final_metrics(metrics: TrackingMetrics) -> None:
'\n Print metrics to stdout.\n :param metrics: The output of evaluate().\n '
print('\n### Final results ###')
metric_names = metrics.label_metrics.keys()
print('\nPer-class results:')
print('\t\t', end='')
print('\t'.join([m.upper() for m in metric_names]))
class_names = metrics.class_names
max_name_length = 7
for class_name in class_names:
print_class_name = class_name[:max_name_length].ljust((max_name_length + 1))
print(('%s' % print_class_name), end='')
for metric_name in metric_names:
val = metrics.label_metrics[metric_name][class_name]
print_format = ('%f' if np.isnan(val) else metric_name_to_print_format(metric_name))
print(('\t%s' % (print_format % val)), end='')
print()
print('\nAggregated results:')
for metric_name in metric_names:
val = metrics.compute_metric(metric_name, 'all')
print_format = metric_name_to_print_format(metric_name)
print(('%s\t%s' % (metric_name.upper(), (print_format % val))))
print(('Eval time: %.1fs' % metrics.eval_time))
print()
|
def print_threshold_metrics(metrics: Dict[(str, Dict[(str, float)])]) -> None:
'\n Print only a subset of the metrics for the current class and threshold.\n :param metrics: A dictionary representation of the metrics.\n '
assert (len(metrics['mota_custom'].keys()) == 1)
threshold_str = list(metrics['mota_custom'].keys())[0]
motar_val = metrics['motar'][threshold_str]
motp = metrics['motp_custom'][threshold_str]
recall = metrics['recall'][threshold_str]
num_frames = metrics['num_frames'][threshold_str]
num_objects = metrics['num_objects'][threshold_str]
num_predictions = metrics['num_predictions'][threshold_str]
num_false_positives = metrics['num_false_positives'][threshold_str]
num_misses = metrics['num_misses'][threshold_str]
num_switches = metrics['num_switches'][threshold_str]
num_matches = metrics['num_matches'][threshold_str]
print(('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % ('\t', 'MOTAR', 'MOTP', 'Recall', 'Frames', 'GT', 'GT-Mtch', 'GT-Miss', 'GT-IDS', 'Pred', 'Pred-TP', 'Pred-FP', 'Pred-IDS')))
print(('%s\t%.3f\t%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d' % (threshold_str, motar_val, motp, recall, num_frames, num_objects, num_matches, num_misses, num_switches, num_predictions, num_matches, num_false_positives, num_switches)))
print()
assert (num_objects == ((num_matches + num_misses) + num_switches))
assert (num_predictions == ((num_matches + num_false_positives) + num_switches))
|
def create_motmetrics() -> MetricsHost:
'\n Creates a MetricsHost and populates it with default and custom metrics.\n It does not populate the global metrics which are more time consuming.\n :return The initialized MetricsHost object with default MOT metrics.\n '
mh = MetricsHost()
warnings.filterwarnings('ignore', category=DeprecationWarning)
fields = ['num_frames', 'obj_frequencies', 'num_matches', 'num_switches', 'num_false_positives', 'num_misses', 'num_detections', 'num_objects', 'num_predictions', 'mostly_tracked', 'mostly_lost', 'num_fragmentations', 'motp', 'mota', 'precision', 'recall', 'track_ratios']
for field in fields:
mh.register(getattr(motmetrics.metrics, field), formatter='{:d}'.format)
warnings.filterwarnings('default', category=DeprecationWarning)
mh.register(motar, ['num_matches', 'num_misses', 'num_switches', 'num_false_positives', 'num_objects'], formatter='{:.2%}'.format, name='motar')
mh.register(mota_custom, ['num_misses', 'num_switches', 'num_false_positives', 'num_objects'], formatter='{:.2%}'.format, name='mota_custom')
mh.register(motp_custom, ['num_detections'], formatter='{:.2%}'.format, name='motp_custom')
mh.register(num_fragmentations_custom, ['obj_frequencies'], formatter='{:.2%}'.format, name='num_fragmentations_custom')
mh.register(faf, ['num_false_positives', 'num_frames'], formatter='{:.2%}'.format, name='faf')
mh.register(track_initialization_duration, ['obj_frequencies'], formatter='{:.2%}'.format, name='tid')
mh.register(longest_gap_duration, ['obj_frequencies'], formatter='{:.2%}'.format, name='lgd')
return mh
|
def truncate_class_name(class_name: str) -> str:
'\n Truncate a given class name according to a pre-defined map.\n :param class_name: The long form (i.e. original form) of the class name.\n :return: The truncated form of the class name.\n '
string_mapper = {'noise': 'noise', 'human.pedestrian.adult': 'adult', 'human.pedestrian.child': 'child', 'human.pedestrian.wheelchair': 'wheelchair', 'human.pedestrian.stroller': 'stroller', 'human.pedestrian.personal_mobility': 'p.mobility', 'human.pedestrian.police_officer': 'police', 'human.pedestrian.construction_worker': 'worker', 'animal': 'animal', 'vehicle.car': 'car', 'vehicle.motorcycle': 'motorcycle', 'vehicle.bicycle': 'bicycle', 'vehicle.bus.bendy': 'bus.bendy', 'vehicle.bus.rigid': 'bus.rigid', 'vehicle.truck': 'truck', 'vehicle.construction': 'constr. veh', 'vehicle.emergency.ambulance': 'ambulance', 'vehicle.emergency.police': 'police car', 'vehicle.trailer': 'trailer', 'movable_object.barrier': 'barrier', 'movable_object.trafficcone': 'trafficcone', 'movable_object.pushable_pullable': 'push/pullable', 'movable_object.debris': 'debris', 'static_object.bicycle_rack': 'bicycle racks', 'flat.driveable_surface': 'driveable', 'flat.sidewalk': 'sidewalk', 'flat.terrain': 'terrain', 'flat.other': 'flat.other', 'static.manmade': 'manmade', 'static.vegetation': 'vegetation', 'static.other': 'static.other', 'vehicle.ego': 'ego'}
return string_mapper[class_name]
|
def render_histogram(nusc: NuScenes, sort_by: str='count_desc', verbose: bool=True, font_size: int=20, save_as_img_name: str=None) -> None:
'\n Render two histograms for the given nuScenes split. The top histogram depicts the number of scan-wise instances\n for each class, while the bottom histogram depicts the number of points for each class.\n :param nusc: A nuScenes object.\n :param sort_by: How to sort the classes to display in the plot (note that the x-axis, where the class names will be\n displayed on, is shared by the two histograms):\n - count_desc: Sort the classes by the number of points belonging to each class, in descending order.\n - count_asc: Sort the classes by the number of points belonging to each class, in ascending order.\n - name: Sort the classes by alphabetical order.\n - index: Sort the classes by their indices.\n :param verbose: Whether to display the plot in a window after rendering.\n :param font_size: Size of the font to use for the plot.\n :param save_as_img_name: Path (including image name and extension) to save the plot as.\n '
lidarseg_num_points_per_class = get_lidarseg_num_points_per_class(nusc, sort_by=sort_by)
panoptic_num_instances_per_class = get_panoptic_num_instances_per_class(nusc, sort_by=sort_by)
panoptic_num_instances_per_class_tmp = dict()
for class_name in lidarseg_num_points_per_class.keys():
num_instances_for_class = panoptic_num_instances_per_class.get(class_name, 0)
panoptic_num_instances_per_class_tmp[class_name] = num_instances_for_class
panoptic_num_instances_per_class = panoptic_num_instances_per_class_tmp
histograms_config = dict({'panoptic': {'y_values': list(panoptic_num_instances_per_class.values()), 'y_label': 'No. of instances', 'y_scale': 'log'}, 'lidarseg': {'y_values': list(lidarseg_num_points_per_class.values()), 'y_label': 'No. of lidar points', 'y_scale': 'log'}})
assert (lidarseg_num_points_per_class.keys() == panoptic_num_instances_per_class.keys()), 'Error: There are {} classes for lidarseg, but {} classes for panoptic.'.format(len(lidarseg_num_points_per_class.keys()), len(panoptic_num_instances_per_class.keys()))
class_names = list(lidarseg_num_points_per_class.keys())
cmap = get_colormap()
colors = [('#%02x%02x%02x' % tuple(cmap[cn])) for cn in class_names]
class_names = [truncate_class_name(cn) for cn in class_names]
(fig, axes) = plt.subplots(nrows=2, sharex=True, figsize=(16, 9))
for ax in axes:
ax.margins(x=0.005)
ax.set_axisbelow(True)
ax.yaxis.grid(color='white', linewidth=2)
ax.set_facecolor('#eaeaf2')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
for (i, (histogram, config)) in enumerate(histograms_config.items()):
axes[i].bar(class_names, config['y_values'], color=colors)
assert (len(class_names) == len(axes[i].get_xticks())), 'There are {} classes, but {} are shown on the x-axis'.format(len(class_names), len(axes[i].get_xticks()))
axes[i].set_xticklabels(class_names, rotation=45, horizontalalignment='right', fontweight='light', fontsize=font_size)
trans = mtrans.Affine2D().translate(10, 0)
for t in axes[i].get_xticklabels():
t.set_transform((t.get_transform() + trans))
axes[i].set_ylabel(config['y_label'], fontsize=font_size)
axes[i].set_yticklabels(config['y_values'], size=font_size)
axes[i].set_yscale(config['y_scale'])
if (config['y_scale'] == 'linear'):
axes[i].yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1e'))
if save_as_img_name:
plt.tight_layout()
fig.savefig(save_as_img_name)
if verbose:
plt.show()
|
def get_lidarseg_num_points_per_class(nusc: NuScenes, sort_by: str='count_desc') -> Dict[(str, int)]:
'\n Get the number of points belonging to each class for the given nuScenes split.\n :param nusc: A NuScenes object.\n :param sort_by: How to sort the classes:\n - count_desc: Sort the classes by the number of points belonging to each class, in descending order.\n - count_asc: Sort the classes by the number of points belonging to each class, in ascending order.\n - name: Sort the classes by alphabetical order.\n - index: Sort the classes by their indices.\n :return: A dictionary whose keys are the class names and values are the corresponding number of points for each\n class.\n '
lidarseg_counts = ([0] * len(nusc.lidarseg_idx2name_mapping))
for record_lidarseg in nusc.lidarseg:
lidarseg_labels_filename = os.path.join(nusc.dataroot, record_lidarseg['filename'])
points_label = np.fromfile(lidarseg_labels_filename, dtype=np.uint8)
indices = np.bincount(points_label)
ii = np.nonzero(indices)[0]
for (class_idx, class_count) in zip(ii, indices[ii]):
lidarseg_counts[class_idx] += class_count
num_points_per_class = dict()
for i in range(len(lidarseg_counts)):
num_points_per_class[nusc.lidarseg_idx2name_mapping[i]] = lidarseg_counts[i]
if (sort_by == 'count_desc'):
num_points_per_class = dict(sorted(num_points_per_class.items(), key=(lambda item: item[1]), reverse=True))
elif (sort_by == 'count_asc'):
num_points_per_class = dict(sorted(num_points_per_class.items(), key=(lambda item: item[1])))
elif (sort_by == 'name'):
num_points_per_class = dict(sorted(num_points_per_class.items()))
elif (sort_by == 'index'):
num_points_per_class = dict(num_points_per_class.items())
else:
raise Exception('Error: Invalid sorting mode {}. Only `count_desc`, `count_asc`, `name` or `index` are valid.'.format(sort_by))
return num_points_per_class
|
def get_panoptic_num_instances_per_class(nusc: NuScenes, sort_by: str='count_desc') -> Dict[(str, int)]:
'\n Get the number of scan-wise instances belonging to each class for the given nuScenes split.\n :param nusc: A NuScenes object.\n :param sort_by: How to sort the classes:\n - count_desc: Sort the classes by the number of instances belonging to each class, in descending order.\n - count_asc: Sort the classes by the number of instances belonging to each class, in ascending order.\n - name: Sort the classes by alphabetical order.\n - index: Sort the classes by their indices.\n :return: A dictionary whose keys are the class names and values are the corresponding number of scan-wise instances\n for each class.\n '
sequence_wise_instances_per_class = dict()
for instance in nusc.instance:
instance_class = nusc.get('category', instance['category_token'])['name']
if (instance_class not in sequence_wise_instances_per_class.keys()):
sequence_wise_instances_per_class[instance_class] = 0
sequence_wise_instances_per_class[instance_class] += instance['nbr_annotations']
if (sort_by == 'count_desc'):
sequence_wise_instances_per_class = dict(sorted(sequence_wise_instances_per_class.items(), key=(lambda item: item[1]), reverse=True))
elif (sort_by == 'count_asc'):
sequence_wise_instances_per_class = dict(sorted(sequence_wise_instances_per_class.items(), key=(lambda item: item[1])))
elif (sort_by == 'name'):
sequence_wise_instances_per_class = dict(sorted(sequence_wise_instances_per_class.items()))
elif (sort_by == 'index'):
sequence_wise_instances_per_class = dict(sequence_wise_instances_per_class.items())
else:
raise Exception('Error: Invalid sorting mode {}. Only `count_desc`, `count_asc`, `name` or `index` are valid.'.format(sort_by))
return sequence_wise_instances_per_class
|
class BitMap():
def __init__(self, dataroot: str, map_name: str, layer_name: str):
"\n This class is used to render bitmap map layers. Currently these are:\n - semantic_prior: The semantic prior (driveable surface and sidewalks) mask from nuScenes 1.0.\n - basemap: The HD lidar basemap used for localization and as general context.\n\n :param dataroot: Path of the nuScenes dataset.\n :param map_name: Which map out of `singapore-onenorth`, `singepore-hollandvillage`, `singapore-queenstown` and\n 'boston-seaport'.\n :param layer_name: The type of bitmap map, `semanitc_prior` or `basemap.\n "
self.dataroot = dataroot
self.map_name = map_name
self.layer_name = layer_name
self.image = self.load_bitmap()
def load_bitmap(self) -> np.ndarray:
'\n Load the specified bitmap.\n '
if (self.layer_name == 'basemap'):
map_path = os.path.join(self.dataroot, 'maps', 'basemap', (self.map_name + '.png'))
elif (self.layer_name == 'semantic_prior'):
map_hashes = {'singapore-onenorth': '53992ee3023e5494b90c316c183be829', 'singapore-hollandvillage': '37819e65e09e5547b8a3ceaefba56bb2', 'singapore-queenstown': '93406b464a165eaba6d9de76ca09f5da', 'boston-seaport': '36092f0b03a857c6a3403e25b4b7aab3'}
map_hash = map_hashes[self.map_name]
map_path = os.path.join(self.dataroot, 'maps', (map_hash + '.png'))
else:
raise Exception(('Error: Invalid bitmap layer: %s' % self.layer_name))
if os.path.exists(map_path):
image = np.array(Image.open(map_path))
else:
raise Exception(('Error: Cannot find %s %s! Please make sure that the map is correctly installed.' % (self.layer_name, map_path)))
if (self.layer_name == 'semantic_prior'):
image = (image.max() - image)
return image
def render(self, canvas_edge: Tuple[(float, float)], ax: Axis=None):
'\n Render the bitmap.\n Note: Regardless of the image dimensions, the image will be rendered to occupy the entire map.\n :param canvas_edge: The dimension of the current map in meters (width, height).\n :param ax: Optional axis to render to.\n '
if (ax is None):
ax = plt.subplot()
(x, y) = canvas_edge
if (len(self.image.shape) == 2):
ax.imshow(self.image, extent=[0, x, 0, y], cmap='gray')
else:
ax.imshow(self.image, extent=[0, x, 0, y])
|
class TestAllMaps(unittest.TestCase):
version = 'v1.0-mini'
render = False
def setUp(self):
' Initialize the map for each location. '
self.nusc_maps = dict()
for map_name in locations:
nusc_map = NuScenesMap(map_name=map_name, dataroot=os.environ['NUSCENES'])
if self.render:
nusc_map.render_layers(['lane'], figsize=1)
plt.show()
self.nusc_maps[map_name] = nusc_map
def test_layer_stats(self):
' Test if each layer has the right number of instances. This is useful to compare between map versions. '
layer_counts = defaultdict((lambda : []))
ref_counts = {'singapore-onenorth': [1, 783, 645, 936, 120, 838, 451, 39, 152, 357, 127], 'singapore-hollandvillage': [426, 167, 387, 601, 28, 498, 300, 0, 107, 220, 119], 'singapore-queenstown': [219, 260, 676, 910, 75, 457, 437, 40, 172, 257, 81], 'boston-seaport': [2, 928, 969, 1215, 340, 301, 775, 275, 377, 671, 307]}
for map_name in locations:
nusc_map = self.nusc_maps[map_name]
for layer_name in nusc_map.non_geometric_layers:
layer_objs = nusc_map.json_obj[layer_name]
layer_counts[map_name].append(len(layer_objs))
assert (ref_counts[map_name] == layer_counts[map_name]), ('Error: Map %s has a different number of layers: \n%s vs. \n%s' % (map_name, ref_counts[map_name], layer_counts[map_name]))
@unittest.skip
def test_disconnected_lanes(self):
' Check if any lanes are disconnected. '
found_error = False
for map_name in locations:
nusc_map = self.nusc_maps[map_name]
disconnected = get_disconnected_lanes(nusc_map)
if (len(disconnected) > 0):
print(('Error: Missing connectivity in map %s for %d lanes: \n%s' % (map_name, len(disconnected), disconnected)))
found_error = True
self.assertFalse(found_error, 'Error: Found missing connectivity. See messages above!')
def test_egoposes_on_map(self):
' Test that all ego poses land on '
nusc = NuScenes(version=self.version, dataroot=os.environ['NUSCENES'], verbose=False)
whitelist = ['scene-0499', 'scene-0501', 'scene-0502', 'scene-0515', 'scene-0517']
invalid_scenes = []
for scene in tqdm.tqdm(nusc.scene, leave=False):
if (scene['name'] in whitelist):
continue
log = nusc.get('log', scene['log_token'])
map_name = log['location']
nusc_map = self.nusc_maps[map_name]
ratio_valid = get_egoposes_on_drivable_ratio(nusc, nusc_map, scene['token'])
if (ratio_valid != 1.0):
print(('Error: Scene %s has a ratio of %f ego poses on the driveable area!' % (scene['name'], ratio_valid)))
invalid_scenes.append(scene['name'])
self.assertEqual(len(invalid_scenes), 0)
|
class TestUtils(unittest.TestCase):
def setUp(self) -> None:
self.straight_path = {'start_pose': [421.2419602954602, 1087.9127960414617, 2.739593514975998], 'end_pose': [391.7142849867393, 1100.464077182952, 2.7365754617298705], 'shape': 'LSR', 'radius': 999.999, 'segment_length': [0.23651121617864976, 28.593481378991886, 3.254561444252876]}
self.left_path = {'start_pose': [391.7142849867393, 1100.464077182952, 2.7365754617298705], 'end_pose': [372.7733659833846, 1093.0160135871615, (- 2.000208580915862)], 'shape': 'LSL', 'radius': 14.473414516079979, 'segment_length': [22.380622583127813, 0.18854612175175053, 0.0010839266609007578]}
self.right_path = {'start_pose': [367.53376358458553, 1097.5300417399676, 1.1738120532326812], 'end_pose': [392.24904359636037, 1112.5206834496375, (- 0.4033046016493182)], 'shape': 'RSR', 'radius': 16.890467008945414, 'segment_length': [4.423187697943063e-05, 6.490596454713637, 26.63819259666578]}
self.straight_lane = [self.straight_path]
self.curved_lane = [self.straight_path, self.left_path]
self.right_lane = [self.right_path]
def test_discretize_straight_path(self):
discrete_path = arcline_path_utils.discretize(self.straight_path, 10)
answer = np.array([(421.2419602954602, 1087.9127960414617, 2.739593514975998), (413.85953060356087, 1091.049417600379, 2.739830026428688), (406.4770899726762, 1094.1860134184205, 2.739830026428688), (399.0946493417915, 1097.322609236462, 2.739830026428688), (391.71428498673856, 1100.4640771829522, 2.7365754617298705)])
np.testing.assert_allclose(answer, discrete_path)
def test_discretize_curved_path(self):
discrete_path = arcline_path_utils.discretize(self.left_path, 2)
answer = np.array([(391.7142849867393, 1100.464077182952, 2.7365754617298705), (389.94237388555354, 1101.0909492468568, 2.8665278225823894), (388.10416900705434, 1101.4829190922167, 2.996480183434908), (386.23066958739906, 1101.633376593063, 3.126432544287426), (384.3534700650694, 1101.539784454639, (- 3.026800402039642)), (382.50422727657343, 1101.2037210019917, (- 2.8968480411871234)), (380.714126599876, 1100.630853563314, (- 2.7668956803346045)), (379.01335604844144, 1099.830842896896, (- 2.6369433194820857)), (377.4305971846951, 1098.8171802734153, (- 2.506990958629568)), (375.99254143806974, 1097.6069599609898, (- 2.377038597777049)), (374.7234399843828, 1096.220590949774, (- 2.24708623692453)), (373.64469477731785, 1094.6814527775348, (- 2.117133876072012)), (372.7733659833847, 1093.0160135871613, (- 2.0002085809158623))])
np.testing.assert_allclose(answer, discrete_path)
def test_discretize_curved_lane(self):
discrete_path = arcline_path_utils.discretize_lane(self.curved_lane, 5)
answer = np.array([(421.2419602954602, 1087.9127960414617, 2.739593514975998), (417.0234337310829, 1089.7051622497897, 2.739830026428688), (412.80489622772023, 1091.497502717242, 2.739830026428688), (408.5863587243576, 1093.2898431846943, 2.739830026428688), (404.3678212209949, 1095.0821836521468, 2.739830026428688), (400.1492837176322, 1096.874524119599, 2.739830026428688), (395.93074621426956, 1098.6668645870514, 2.739830026428688), (391.71428498673856, 1100.4640771829522, 2.7365754617298705), (391.7142849867393, 1100.464077182952, 2.7365754617298705), (387.35724292592613, 1101.5723176767192, 3.048461127775915), (382.87033132963325, 1101.2901176788932, (- 2.922838513357627)), (378.6864775951582, 1099.6447057425564, (- 2.610952847311582)), (375.20936805976606, 1096.7948422737907, (- 2.2990671812655377)), (372.7733659833847, 1093.0160135871613, (- 2.0002085809158623))])
np.testing.assert_allclose(answer, discrete_path)
def test_length_of_lane(self):
self.assertEqual(arcline_path_utils.length_of_lane(self.straight_lane), sum(self.straight_path['segment_length']))
self.assertEqual(arcline_path_utils.length_of_lane(self.right_lane), sum(self.right_path['segment_length']))
self.assertEqual(arcline_path_utils.length_of_lane(self.curved_lane), (sum(self.straight_path['segment_length']) + sum(self.left_path['segment_length'])))
def test_project_pose_to_straight_lane(self):
theta = 2.739593514975998
end_pose = ((421.2419602954602 + (10 * math.cos(theta))), (1087.9127960414617 + (10 * math.sin(theta))), theta)
(pose, s) = arcline_path_utils.project_pose_to_lane(end_pose, self.straight_lane)
np.testing.assert_allclose(np.array(pose).astype('int'), np.array(end_pose).astype('int'))
self.assertTrue((abs((s - 10)) <= 0.5))
def test_project_pose_not_close_to_lane(self):
pose = (362, 1092, 1.15)
(pose_on_lane, s) = arcline_path_utils.project_pose_to_lane(pose, self.right_lane)
self.assertListEqual(list(pose_on_lane), self.right_path['start_pose'])
self.assertEqual(s, 0)
def test_project_pose_to_curved_lane(self):
theta = 2.739593514975998
end_pose_1 = ((421.2419602954602 + (10 * math.cos(theta))), (1087.9127960414617 + (10 * math.sin(theta))), theta)
end_pose_2 = (381, 1100, (- 2.76))
(pose, s) = arcline_path_utils.project_pose_to_lane(end_pose_1, self.curved_lane)
np.testing.assert_allclose(np.array(pose).astype('int'), np.array(end_pose_1).astype('int'))
self.assertTrue((abs((s - 10)) <= 0.5))
(pose_2, s_2) = arcline_path_utils.project_pose_to_lane(end_pose_2, self.curved_lane)
np.testing.assert_allclose(np.array(pose_2[:2]).astype('int'), np.array([380, 1100]))
self.assertTrue((abs((s_2 - 44)) <= 0.5))
def test_get_curvature_straight_lane(self):
curvature = arcline_path_utils.get_curvature_at_distance_along_lane(15, self.straight_lane)
self.assertEqual(curvature, 0)
def test_curvature_curved_lane(self):
curvature = arcline_path_utils.get_curvature_at_distance_along_lane(53, self.curved_lane)
self.assertEqual(curvature, (1 / self.left_path['radius']))
|
def get_egoposes_on_drivable_ratio(nusc: NuScenes, nusc_map: NuScenesMap, scene_token: str) -> float:
'\n Get the ratio of ego poses on the drivable area.\n :param nusc: A NuScenes instance.\n :param nusc_map: The NuScenesMap instance of a particular map location.\n :param scene_token: The token of the current scene.\n :return: The ratio of poses that fall on the driveable area.\n '
sample_tokens = nusc.field2token('sample', 'scene_token', scene_token)
poses_all = 0
poses_valid = 0
for sample_token in sample_tokens:
sample_record = nusc.get('sample', sample_token)
sample_data_record = nusc.get('sample_data', sample_record['data']['LIDAR_TOP'])
pose_record = nusc.get('ego_pose', sample_data_record['ego_pose_token'])
ego_pose = pose_record['translation'][:2]
record = nusc_map.record_on_point(ego_pose[0], ego_pose[1], 'drivable_area')
if (len(record) > 0):
poses_valid += 1
poses_all += 1
ratio_valid = (poses_valid / poses_all)
return ratio_valid
|
def get_disconnected_subtrees(connectivity: Dict[(str, dict)]) -> Set[str]:
'\n Compute lanes or lane_connectors that are part of disconnected subtrees.\n :param connectivity: The connectivity of the current NuScenesMap.\n :return: The lane_tokens for lanes that are part of a disconnected subtree.\n '
connected = set()
pending = set()
all_keys = list(connectivity.keys())
first_key = all_keys[0]
all_keys = set(all_keys)
pending.add(first_key)
while (len(pending) > 0):
lane_token = pending.pop()
connected.add(lane_token)
if (lane_token in connectivity):
incoming = connectivity[lane_token]['incoming']
outgoing = connectivity[lane_token]['outgoing']
inout_lanes = set((incoming + outgoing))
for other_lane_token in inout_lanes:
if (other_lane_token not in connected):
pending.add(other_lane_token)
disconnected = (all_keys - connected)
assert (len(disconnected) < len(connected)), 'Error: Bad initialization chosen!'
return disconnected
|
def drop_disconnected_lanes(nusc_map: NuScenesMap) -> NuScenesMap:
'\n Remove any disconnected lanes.\n Note: This function is currently not used and we do not recommend using it. Some lanes that we do not drive on are\n disconnected from the other lanes. Removing them would create a single connected graph. It also removes\n meaningful information, which is why we do not drop these.\n :param nusc_map: The NuScenesMap instance of a particular map location.\n :return: The cleaned NuScenesMap instance.\n '
disconnected = get_disconnected_lanes(nusc_map)
nusc_map.lane = [lane for lane in nusc_map.lane if (lane['token'] not in disconnected)]
nusc_map.lane_connector = [lane for lane in nusc_map.lane_connector if (lane['token'] not in disconnected)]
for lane_token in disconnected:
if (lane_token in nusc_map.connectivity):
del nusc_map.connectivity[lane_token]
for lane_token in disconnected:
if (lane_token in nusc_map.arcline_path_3):
del nusc_map.arcline_path_3[lane_token]
empty_connectivity = []
for (lane_token, connectivity) in nusc_map.connectivity.items():
connectivity['incoming'] = [i for i in connectivity['incoming'] if (i not in disconnected)]
connectivity['outgoing'] = [o for o in connectivity['outgoing'] if (o not in disconnected)]
if ((len(connectivity['incoming']) + len(connectivity['outgoing'])) == 0):
empty_connectivity.append(lane_token)
for lane_token in empty_connectivity:
del nusc_map.connectivity[lane_token]
nusc_map._make_token2ind()
return nusc_map
|
def get_disconnected_lanes(nusc_map: NuScenesMap) -> List[str]:
'\n Get a list of all disconnected lanes and lane_connectors.\n :param nusc_map: The NuScenesMap instance of a particular map location.\n :return: A list of lane or lane_connector tokens.\n '
disconnected = set()
for (lane_token, connectivity) in nusc_map.connectivity.items():
inout_lanes = (connectivity['incoming'] + connectivity['outgoing'])
if (len(inout_lanes) == 0):
disconnected.add(lane_token)
continue
for inout_lane_token in inout_lanes:
if ((inout_lane_token not in nusc_map._token2ind['lane']) and (inout_lane_token not in nusc_map._token2ind['lane_connector'])):
disconnected.add(inout_lane_token)
subtrees = get_disconnected_subtrees(nusc_map.connectivity)
disconnected = disconnected.union(subtrees)
return sorted(list(disconnected))
|
def pixels_to_box_corners(row_pixel: int, column_pixel: int, length_in_pixels: float, width_in_pixels: float, yaw_in_radians: float) -> np.ndarray:
'\n Computes four corners of 2d bounding box for agent.\n The coordinates of the box are in pixels.\n :param row_pixel: Row pixel of the agent.\n :param column_pixel: Column pixel of the agent.\n :param length_in_pixels: Length of the agent.\n :param width_in_pixels: Width of the agent.\n :param yaw_in_radians: Yaw of the agent (global coordinates).\n :return: numpy array representing the four corners of the agent.\n '
coord_tuple = ((column_pixel, row_pixel), (length_in_pixels, width_in_pixels), (((- yaw_in_radians) * 180) / np.pi))
box = cv2.boxPoints(coord_tuple)
return box
|
def get_track_box(annotation: Dict[(str, Any)], center_coordinates: Tuple[(float, float)], center_pixels: Tuple[(float, float)], resolution: float=0.1) -> np.ndarray:
'\n Get four corners of bounding box for agent in pixels.\n :param annotation: The annotation record of the agent.\n :param center_coordinates: (x, y) coordinates in global frame\n of the center of the image.\n :param center_pixels: (row_index, column_index) location of the center\n of the image in pixel coordinates.\n :param resolution: Resolution pixels/meter of the image.\n '
assert (resolution > 0)
location = annotation['translation'][:2]
yaw_in_radians = quaternion_yaw(Quaternion(annotation['rotation']))
(row_pixel, column_pixel) = convert_to_pixel_coords(location, center_coordinates, center_pixels, resolution)
width = (annotation['size'][0] / resolution)
length = (annotation['size'][1] / resolution)
return pixels_to_box_corners(row_pixel, column_pixel, length, width, yaw_in_radians)
|
def reverse_history(history: History) -> History:
'\n Reverse history so that most distant observations are first.\n We do this because we want to draw more recent bounding boxes on top of older ones.\n :param history: result of get_past_for_sample PredictHelper method.\n :return: History with the values reversed.\n '
return {token: anns[::(- 1)] for (token, anns) in history.items()}
|
def add_present_time_to_history(current_time: List[Dict[(str, Any)]], history: History) -> History:
'\n Adds the sample annotation records from the current time to the\n history object.\n :param current_time: List of sample annotation records from the\n current time. Result of get_annotations_for_sample method of\n PredictHelper.\n :param history: Result of get_past_for_sample method of PredictHelper.\n :return: History with values from current_time appended.\n '
for annotation in current_time:
token = annotation['instance_token']
if (token in history):
history[token].append(annotation)
else:
history[token] = [annotation]
return history
|
def fade_color(color: Tuple[(int, int, int)], step: int, total_number_of_steps: int) -> Tuple[(int, int, int)]:
'\n Fades a color so that past observations are darker in the image.\n :param color: Tuple of ints describing an RGB color.\n :param step: The current time step.\n :param total_number_of_steps: The total number of time steps\n the agent has in the image.\n :return: Tuple representing faded rgb color.\n '
LOWEST_VALUE = 0.4
if (step == total_number_of_steps):
return color
hsv_color = colorsys.rgb_to_hsv(*color)
increment = (((float(hsv_color[2]) / 255.0) - LOWEST_VALUE) / total_number_of_steps)
new_value = (LOWEST_VALUE + (step * increment))
new_rgb = colorsys.hsv_to_rgb(float(hsv_color[0]), float(hsv_color[1]), (new_value * 255.0))
return new_rgb
|
def default_colors(category_name: str) -> Tuple[(int, int, int)]:
'\n Maps a category name to an rgb color (without fading).\n :param category_name: Name of object category for the annotation.\n :return: Tuple representing rgb color.\n '
if ('vehicle' in category_name):
return (255, 255, 0)
elif ('object' in category_name):
return (204, 0, 204)
elif (('human' in category_name) or ('animal' in category_name)):
return (255, 153, 51)
else:
raise ValueError(f'Cannot map {category_name} to a color.')
|
def draw_agent_boxes(center_agent_annotation: Dict[(str, Any)], center_agent_pixels: Tuple[(float, float)], agent_history: History, base_image: np.ndarray, get_color: Callable[([str], Tuple[(int, int, int)])], resolution: float=0.1) -> None:
'\n Draws past sequence of agent boxes on the image.\n :param center_agent_annotation: Annotation record for the agent\n that is in the center of the image.\n :param center_agent_pixels: Pixel location of the agent in the\n center of the image.\n :param agent_history: History for all agents in the scene.\n :param base_image: Image to draw the agents in.\n :param get_color: Mapping from category_name to RGB tuple.\n :param resolution: Size of the image in pixels / meter.\n :return: None.\n '
(agent_x, agent_y) = center_agent_annotation['translation'][:2]
for (instance_token, annotations) in agent_history.items():
num_points = len(annotations)
for (i, annotation) in enumerate(annotations):
box = get_track_box(annotation, (agent_x, agent_y), center_agent_pixels, resolution)
if (instance_token == center_agent_annotation['instance_token']):
color = (255, 0, 0)
else:
color = get_color(annotation['category_name'])
if (num_points > 1):
color = fade_color(color, i, (num_points - 1))
cv2.fillPoly(base_image, pts=[np.int0(box)], color=color)
|
class AgentBoxesWithFadedHistory(AgentRepresentation):
'\n Represents the past sequence of agent states as a three-channel\n image with faded 2d boxes.\n '
def __init__(self, helper: PredictHelper, seconds_of_history: float=2, frequency_in_hz: float=2, resolution: float=0.1, meters_ahead: float=40, meters_behind: float=10, meters_left: float=25, meters_right: float=25, color_mapping: Callable[([str], Tuple[(int, int, int)])]=None):
self.helper = helper
self.seconds_of_history = seconds_of_history
self.frequency_in_hz = frequency_in_hz
if (not (resolution > 0)):
raise ValueError(f'Resolution must be positive. Received {resolution}.')
self.resolution = resolution
self.meters_ahead = meters_ahead
self.meters_behind = meters_behind
self.meters_left = meters_left
self.meters_right = meters_right
if (not color_mapping):
color_mapping = default_colors
self.color_mapping = color_mapping
def make_representation(self, instance_token: str, sample_token: str) -> np.ndarray:
'\n Draws agent boxes with faded history into a black background.\n :param instance_token: Instance token.\n :param sample_token: Sample token.\n :return: np.ndarray representing a 3 channel image.\n '
buffer = (max([self.meters_ahead, self.meters_behind, self.meters_left, self.meters_right]) * 2)
image_side_length = int((buffer / self.resolution))
central_track_pixels = ((image_side_length / 2), (image_side_length / 2))
base_image = np.zeros((image_side_length, image_side_length, 3))
history = self.helper.get_past_for_sample(sample_token, self.seconds_of_history, in_agent_frame=False, just_xy=False)
history = reverse_history(history)
present_time = self.helper.get_annotations_for_sample(sample_token)
history = add_present_time_to_history(present_time, history)
center_agent_annotation = self.helper.get_sample_annotation(instance_token, sample_token)
draw_agent_boxes(center_agent_annotation, central_track_pixels, history, base_image, resolution=self.resolution, get_color=self.color_mapping)
center_agent_yaw = quaternion_yaw(Quaternion(center_agent_annotation['rotation']))
rotation_mat = get_rotation_matrix(base_image.shape, center_agent_yaw)
rotated_image = cv2.warpAffine(base_image, rotation_mat, (base_image.shape[1], base_image.shape[0]))
(row_crop, col_crop) = get_crops(self.meters_ahead, self.meters_behind, self.meters_left, self.meters_right, self.resolution, image_side_length)
return rotated_image[(row_crop, col_crop)].astype('uint8')
|
def add_foreground_to_image(base_image: np.ndarray, foreground_image: np.ndarray) -> np.ndarray:
'\n Overlays a foreground image on top of a base image without mixing colors. Type uint8.\n :param base_image: Image that will be the background. Type uint8.\n :param foreground_image: Image that will be the foreground.\n :return: Image Numpy array of type uint8.\n '
if (not (base_image.shape == foreground_image.shape)):
raise ValueError('base_image and foreground image must have the same shape. Received {} and {}'.format(base_image.shape, foreground_image.shape))
if (not ((base_image.dtype == 'uint8') and (foreground_image.dtype == 'uint8'))):
raise ValueError("base_image and foreground image must be of type 'uint8'. Received {} and {}".format(base_image.dtype, foreground_image.dtype))
img2gray = cv2.cvtColor(foreground_image, cv2.COLOR_BGR2GRAY)
(_, mask) = cv2.threshold(img2gray, 0, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img1_bg = cv2.bitwise_and(base_image, base_image, mask=mask_inv)
img2_fg = cv2.bitwise_and(foreground_image, foreground_image, mask=mask)
combined_image = cv2.add(img1_bg, img2_fg)
return combined_image
|
class Rasterizer(Combinator):
'\n Combines images into a three channel image.\n '
def combine(self, data: List[np.ndarray]) -> np.ndarray:
"\n Combine three channel images into a single image.\n :param data: List of images to combine.\n :return: Numpy array representing image (type 'uint8')\n "
image_shape = data[0].shape
base_image = np.zeros(image_shape).astype('uint8')
return reduce(add_foreground_to_image, ([base_image] + data))
|
class StaticLayerRepresentation(abc.ABC):
' Represents static map information as a numpy array. '
@abc.abstractmethod
def make_representation(self, instance_token: str, sample_token: str) -> np.ndarray:
raise NotImplementedError()
|
class AgentRepresentation(abc.ABC):
' Represents information of agents in scene as numpy array. '
@abc.abstractmethod
def make_representation(self, instance_token: str, sample_token: str) -> np.ndarray:
raise NotImplementedError()
|
class Combinator(abc.ABC):
' Combines the StaticLayer and Agent representations into a single one. '
@abc.abstractmethod
def combine(self, data: List[np.ndarray]) -> np.ndarray:
raise NotImplementedError()
|
class InputRepresentation():
'\n Specifies how to represent the input for a prediction model.\n Need to provide a StaticLayerRepresentation - how the map is represented,\n an AgentRepresentation - how agents in the scene are represented,\n and a Combinator, how the StaticLayerRepresentation and AgentRepresentation should be combined.\n '
def __init__(self, static_layer: StaticLayerRepresentation, agent: AgentRepresentation, combinator: Combinator):
self.static_layer_rasterizer = static_layer
self.agent_rasterizer = agent
self.combinator = combinator
def make_input_representation(self, instance_token: str, sample_token: str) -> np.ndarray:
static_layers = self.static_layer_rasterizer.make_representation(instance_token, sample_token)
agents = self.agent_rasterizer.make_representation(instance_token, sample_token)
return self.combinator.combine([static_layers, agents])
|
class TestRasterizer(unittest.TestCase):
def test(self):
layer_1 = np.zeros((100, 100, 3))
box_1 = cv2.boxPoints(((50, 50), (20, 20), 0))
layer_1 = cv2.fillPoly(layer_1, pts=[np.int0(box_1)], color=(255, 255, 255))
layer_2 = np.zeros((100, 100, 3))
box_2 = cv2.boxPoints(((70, 30), (10, 10), 0))
layer_2 = cv2.fillPoly(layer_2, pts=[np.int0(box_2)], color=(0, 0, 255))
rasterizer = Rasterizer()
image = rasterizer.combine([layer_1.astype('uint8'), layer_2.astype('uint8')])
answer = np.zeros((100, 100, 3))
answer = cv2.fillPoly(answer, pts=[np.int0(box_1)], color=(255, 255, 255))
answer = cv2.fillPoly(answer, pts=[np.int0(box_2)], color=(0, 0, 255))
answer = answer.astype('uint8')
np.testing.assert_allclose(answer, image)
|
class Test_convert_to_pixel_coords(unittest.TestCase):
def test_above_and_to_the_right(self):
location = (55, 60)
center_of_image_in_global = (50, 50)
center_of_image_in_pixels = (400, 250)
pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels)
answer = (300, 300)
self.assertTupleEqual(pixels, answer)
pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels, resolution=0.2)
answer = (350, 275)
self.assertTupleEqual(pixels, answer)
def test_above_and_to_the_left(self):
location = (40, 70)
center_of_image_in_global = (50, 50)
center_of_image_in_pixels = (300, 300)
pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels)
answer = (100, 200)
self.assertTupleEqual(pixels, answer)
pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels, resolution=0.2)
answer = (200, 250)
self.assertTupleEqual(answer, pixels)
def test_below_and_to_the_right(self):
location = (60, 45)
center_of_image_in_global = (50, 50)
center_of_image_in_pixels = (400, 250)
pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels)
answer = (450, 350)
self.assertTupleEqual(pixels, answer)
def test_below_and_to_the_left(self):
location = (30, 40)
center_of_image_in_global = (50, 50)
center_of_image_in_pixels = (400, 250)
pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels)
answer = (500, 50)
self.assertTupleEqual(pixels, answer)
def test_same_location(self):
location = (50, 50)
center_of_image_in_global = (50, 50)
center_of_image_in_pixels = (400, 250)
pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels)
self.assertTupleEqual(pixels, (400, 250))
|
class Test_get_crops(unittest.TestCase):
def test(self):
(row_crop, col_crop) = utils.get_crops(40, 10, 25, 25, 0.1, 800)
self.assertEqual(row_crop, slice(0, 500))
self.assertEqual(col_crop, slice(150, 650))
|
def convert_to_pixel_coords(location: Tuple[(float, float)], center_of_image_in_global: Tuple[(float, float)], center_of_image_in_pixels: Tuple[(float, float)], resolution: float=0.1) -> Tuple[(int, int)]:
'\n Convert from global coordinates to pixel coordinates.\n :param location: Location in global coordinates as (x, y) tuple.\n :param center_of_image_in_global: Center of the image in global coordinates (x, y) tuple.\n :param center_of_image_in_pixels: Center of the image in pixel coordinates (row_pixel, column pixel).\n :param resolution: Resolution of image in pixels / meters.\n '
(x, y) = location
x_offset = (x - center_of_image_in_global[0])
y_offset = (y - center_of_image_in_global[1])
x_pixel = (x_offset / resolution)
y_pixel = ((- y_offset) / resolution)
row_pixel = int((center_of_image_in_pixels[0] + y_pixel))
column_pixel = int((center_of_image_in_pixels[1] + x_pixel))
return (row_pixel, column_pixel)
|
def get_crops(meters_ahead: float, meters_behind: float, meters_left: float, meters_right: float, resolution: float, image_side_length_pixels: int) -> Tuple[(slice, slice)]:
'\n Crop the excess pixels and centers the agent at the (meters_ahead, meters_left)\n coordinate in the image.\n :param meters_ahead: Meters ahead of the agent.\n :param meters_behind: Meters behind of the agent.\n :param meters_left: Meters to the left of the agent.\n :param meters_right: Meters to the right of the agent.\n :param resolution: Resolution of image in pixels / meters.\n :param image_side_length_pixels: Length of the image in pixels.\n :return: Tuple of row and column slices to crop image.\n '
row_crop = slice(0, int(((meters_ahead + meters_behind) / resolution)))
col_crop = slice(int(((image_side_length_pixels / 2) - (meters_left / resolution))), int(((image_side_length_pixels / 2) + (meters_right / resolution))))
return (row_crop, col_crop)
|
def get_rotation_matrix(image_shape: Tuple[(int, int, int)], yaw_in_radians: float) -> np.ndarray:
'\n Gets a rotation matrix to rotate a three channel image so that\n yaw_in_radians points along the positive y-axis.\n :param image_shape: (Length, width, n_channels).\n :param yaw_in_radians: Angle to rotate the image by.\n :return: rotation matrix represented as np.ndarray.\n :return: The rotation matrix.\n '
rotation_in_degrees = ((angle_of_rotation(yaw_in_radians) * 180) / np.pi)
return cv2.getRotationMatrix2D(((image_shape[1] / 2), (image_shape[0] / 2)), rotation_in_degrees, 1)
|
def trim_network_at_index(network: nn.Module, index: int=(- 1)) -> nn.Module:
'\n Returns a new network with all layers up to index from the back.\n :param network: Module to trim.\n :param index: Where to trim the network. Counted from the last layer.\n '
assert (index < 0), f'Param index must be negative. Received {index}.'
return nn.Sequential(*list(network.children())[:index])
|
def calculate_backbone_feature_dim(backbone, input_shape: Tuple[(int, int, int)]) -> int:
' Helper to calculate the shape of the fully-connected regression layer. '
tensor = torch.ones(1, *input_shape)
output_feat = backbone.forward(tensor)
return output_feat.shape[(- 1)]
|
class ResNetBackbone(nn.Module):
'\n Outputs tensor after last convolution before the fully connected layer.\n\n Allowed versions: resnet18, resnet34, resnet50, resnet101, resnet152.\n '
def __init__(self, version: str):
'\n Inits ResNetBackbone\n :param version: resnet version to use.\n '
super().__init__()
if (version not in RESNET_VERSION_TO_MODEL):
raise ValueError(f'Parameter version must be one of {list(RESNET_VERSION_TO_MODEL.keys())}. Received {version}.')
self.backbone = trim_network_at_index(RESNET_VERSION_TO_MODEL[version](), (- 1))
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
'\n Outputs features after last convolution.\n :param input_tensor: Shape [batch_size, n_channels, length, width].\n :return: Tensor of shape [batch_size, n_convolution_filters]. For resnet50,\n the shape is [batch_size, 2048].\n '
backbone_features = self.backbone(input_tensor)
return torch.flatten(backbone_features, start_dim=1)
|
class MobileNetBackbone(nn.Module):
'\n Outputs tensor after last convolution before the fully connected layer.\n\n Allowed versions: mobilenet_v2.\n '
def __init__(self, version: str):
'\n Inits MobileNetBackbone.\n :param version: mobilenet version to use.\n '
super().__init__()
if (version != 'mobilenet_v2'):
raise NotImplementedError(f'Only mobilenet_v2 has been implemented. Received {version}.')
self.backbone = trim_network_at_index(mobilenet_v2(), (- 1))
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
'\n Outputs features after last convolution.\n :param input_tensor: Shape [batch_size, n_channels, length, width].\n :return: Tensor of shape [batch_size, n_convolution_filters]. For mobilenet_v2,\n the shape is [batch_size, 1280].\n '
backbone_features = self.backbone(input_tensor)
return backbone_features.mean([2, 3])
|
class CoverNet(nn.Module):
' Implementation of CoverNet https://arxiv.org/pdf/1911.10298.pdf '
def __init__(self, backbone: nn.Module, num_modes: int, n_hidden_layers: List[int]=None, input_shape: Tuple[(int, int, int)]=(3, 500, 500)):
'\n Inits Covernet.\n :param backbone: Backbone model. Typically ResNetBackBone or MobileNetBackbone\n :param num_modes: Number of modes in the lattice\n :param n_hidden_layers: List of dimensions in the fully connected layers after the backbones.\n If None, set to [4096]\n :param input_shape: Shape of image input. Used to determine the dimensionality of the feature\n vector after the CNN backbone.\n '
if (n_hidden_layers and (not isinstance(n_hidden_layers, list))):
raise ValueError(f'Param n_hidden_layers must be a list. Received {type(n_hidden_layers)}')
super().__init__()
if (not n_hidden_layers):
n_hidden_layers = [4096]
self.backbone = backbone
backbone_feature_dim = calculate_backbone_feature_dim(backbone, input_shape)
n_hidden_layers = (([(backbone_feature_dim + ASV_DIM)] + n_hidden_layers) + [num_modes])
linear_layers = [nn.Linear(in_dim, out_dim) for (in_dim, out_dim) in zip(n_hidden_layers[:(- 1)], n_hidden_layers[1:])]
self.head = nn.ModuleList(linear_layers)
def forward(self, image_tensor: torch.Tensor, agent_state_vector: torch.Tensor) -> torch.Tensor:
'\n :param image_tensor: Tensor of images in the batch.\n :param agent_state_vector: Tensor of agent state vectors in the batch\n :return: Logits for the batch.\n '
backbone_features = self.backbone(image_tensor)
logits = torch.cat([backbone_features, agent_state_vector], dim=1)
for linear in self.head:
logits = linear(logits)
return logits
|
def mean_pointwise_l2_distance(lattice: torch.Tensor, ground_truth: torch.Tensor) -> torch.Tensor:
'\n Computes the index of the closest trajectory in the lattice as measured by l1 distance.\n :param lattice: Lattice of pre-generated trajectories. Shape [num_modes, n_timesteps, state_dim]\n :param ground_truth: Ground truth trajectory of agent. Shape [1, n_timesteps, state_dim].\n :return: Index of closest mode in the lattice.\n '
stacked_ground_truth = ground_truth.repeat(lattice.shape[0], 1, 1)
return torch.pow((lattice - stacked_ground_truth), 2).sum(dim=2).sqrt().mean(dim=1).argmin()
|
class ConstantLatticeLoss():
'\n Computes the loss for a constant lattice CoverNet model.\n '
def __init__(self, lattice: Union[(np.ndarray, torch.Tensor)], similarity_function: Callable[([torch.Tensor, torch.Tensor], int)]=mean_pointwise_l2_distance):
'\n Inits the loss.\n :param lattice: numpy array of shape [n_modes, n_timesteps, state_dim]\n :param similarity_function: Function that computes the index of the closest trajectory in the lattice\n to the actual ground truth trajectory of the agent.\n '
self.lattice = torch.Tensor(lattice)
self.similarity_func = similarity_function
def __call__(self, batch_logits: torch.Tensor, batch_ground_truth_trajectory: torch.Tensor) -> torch.Tensor:
'\n Computes the loss on a batch.\n :param batch_logits: Tensor of shape [batch_size, n_modes]. Output of a linear layer since this class\n uses nn.functional.cross_entropy.\n :param batch_ground_truth_trajectory: Tensor of shape [batch_size, 1, n_timesteps, state_dim]\n :return: Average element-wise loss on the batch.\n '
if (self.lattice.device != batch_logits.device):
self.lattice = self.lattice.to(batch_logits.device)
batch_losses = torch.Tensor().requires_grad_(True).to(batch_logits.device)
for (logit, ground_truth) in zip(batch_logits, batch_ground_truth_trajectory):
closest_lattice_trajectory = self.similarity_func(self.lattice, ground_truth)
label = torch.LongTensor([closest_lattice_trajectory]).to(batch_logits.device)
classification_loss = f.cross_entropy(logit.unsqueeze(0), label)
batch_losses = torch.cat((batch_losses, classification_loss.unsqueeze(0)), 0)
return batch_losses.mean()
|
def _kinematics_from_tokens(helper: PredictHelper, instance: str, sample: str) -> KinematicsData:
'\n Returns the 2D position, velocity and acceleration vectors from the given track records,\n along with the speed, yaw rate, (scalar) acceleration (magnitude), and heading.\n :param helper: Instance of PredictHelper.\n :instance: Token of instance.\n :sample: Token of sample.\n :return: KinematicsData.\n '
annotation = helper.get_sample_annotation(instance, sample)
(x, y, _) = annotation['translation']
yaw = quaternion_yaw(Quaternion(annotation['rotation']))
velocity = helper.get_velocity_for_agent(instance, sample)
acceleration = helper.get_acceleration_for_agent(instance, sample)
yaw_rate = helper.get_heading_change_rate_for_agent(instance, sample)
if np.isnan(velocity):
velocity = 0.0
if np.isnan(acceleration):
acceleration = 0.0
if np.isnan(yaw_rate):
yaw_rate = 0.0
(hx, hy) = (np.cos(yaw), np.sin(yaw))
(vx, vy) = ((velocity * hx), (velocity * hy))
(ax, ay) = ((acceleration * hx), (acceleration * hy))
return (x, y, vx, vy, ax, ay, velocity, yaw_rate, acceleration, yaw)
|
def _constant_velocity_heading_from_kinematics(kinematics_data: KinematicsData, sec_from_now: float, sampled_at: int) -> np.ndarray:
'\n Computes a constant velocity baseline for given kinematics data, time window\n and frequency.\n :param kinematics_data: KinematicsData for agent.\n :param sec_from_now: How many future seconds to use.\n :param sampled_at: Number of predictions to make per second.\n '
(x, y, vx, vy, _, _, _, _, _, _) = kinematics_data
preds = []
time_step = (1.0 / sampled_at)
for time in np.arange(time_step, (sec_from_now + time_step), time_step):
preds.append(((x + (time * vx)), (y + (time * vy))))
return np.array(preds)
|
def _constant_acceleration_and_heading(kinematics_data: KinematicsData, sec_from_now: float, sampled_at: int) -> np.ndarray:
'\n Computes a baseline prediction for the given time window and frequency, under\n the assumption that the acceleration and heading are constant.\n :param kinematics_data: KinematicsData for agent.\n :param sec_from_now: How many future seconds to use.\n :param sampled_at: Number of predictions to make per second.\n '
(x, y, vx, vy, ax, ay, _, _, _, _) = kinematics_data
preds = []
time_step = (1.0 / sampled_at)
for time in np.arange(time_step, (sec_from_now + time_step), time_step):
half_time_squared = ((0.5 * time) * time)
preds.append((((x + (time * vx)) + (half_time_squared * ax)), ((y + (time * vy)) + (half_time_squared * ay))))
return np.array(preds)
|
def _constant_speed_and_yaw_rate(kinematics_data: KinematicsData, sec_from_now: float, sampled_at: int) -> np.ndarray:
'\n Computes a baseline prediction for the given time window and frequency, under\n the assumption that the (scalar) speed and yaw rate are constant.\n :param kinematics_data: KinematicsData for agent.\n :param sec_from_now: How many future seconds to use.\n :param sampled_at: Number of predictions to make per second.\n '
(x, y, vx, vy, _, _, speed, yaw_rate, _, yaw) = kinematics_data
preds = []
time_step = (1.0 / sampled_at)
distance_step = (time_step * speed)
yaw_step = (time_step * yaw_rate)
for _ in np.arange(time_step, (sec_from_now + time_step), time_step):
x += (distance_step * np.cos(yaw))
y += (distance_step * np.sin(yaw))
preds.append((x, y))
yaw += yaw_step
return np.array(preds)
|
def _constant_magnitude_accel_and_yaw_rate(kinematics_data: KinematicsData, sec_from_now: float, sampled_at: int) -> np.ndarray:
'\n Computes a baseline prediction for the given time window and frequency, under\n the assumption that the rates of change of speed and yaw are constant.\n :param kinematics_data: KinematicsData for agent.\n :param sec_from_now: How many future seconds to use.\n :param sampled_at: Number of predictions to make per second.\n '
(x, y, vx, vy, _, _, speed, yaw_rate, accel, yaw) = kinematics_data
preds = []
time_step = (1.0 / sampled_at)
speed_step = (time_step * accel)
yaw_step = (time_step * yaw_rate)
for _ in np.arange(time_step, (sec_from_now + time_step), time_step):
distance_step = (time_step * speed)
x += (distance_step * np.cos(yaw))
y += (distance_step * np.sin(yaw))
preds.append((x, y))
speed += speed_step
yaw += yaw_step
return np.array(preds)
|
class Baseline(abc.ABC):
def __init__(self, sec_from_now: float, helper: PredictHelper):
'\n Inits Baseline.\n :param sec_from_now: How many seconds into the future to make the prediction.\n :param helper: Instance of PredictHelper.\n '
assert ((sec_from_now % 0.5) == 0), f'Parameter sec from now must be divisible by 0.5. Received {sec_from_now}.'
self.helper = helper
self.sec_from_now = sec_from_now
self.sampled_at = 2
@abc.abstractmethod
def __call__(self, token: str) -> Prediction:
pass
|
class ConstantVelocityHeading(Baseline):
' Makes predictions according to constant velocity and heading model. '
def __call__(self, token: str) -> Prediction:
'\n Makes prediction.\n :param token: string of format {instance_token}_{sample_token}.\n '
(instance, sample) = token.split('_')
kinematics = _kinematics_from_tokens(self.helper, instance, sample)
cv_heading = _constant_velocity_heading_from_kinematics(kinematics, self.sec_from_now, self.sampled_at)
return Prediction(instance, sample, np.expand_dims(cv_heading, 0), np.array([1]))
|
class PhysicsOracle(Baseline):
' Makes several physics-based predictions and picks the one closest to the ground truth. '
def __call__(self, token) -> Prediction:
'\n Makes prediction.\n :param token: string of format {instance_token}_{sample_token}.\n '
(instance, sample) = token.split('_')
kinematics = _kinematics_from_tokens(self.helper, instance, sample)
ground_truth = self.helper.get_future_for_agent(instance, sample, self.sec_from_now, in_agent_frame=False)
assert (ground_truth.shape[0] == int((self.sec_from_now * self.sampled_at))), f'Ground truth does not correspond to {self.sec_from_now} seconds.'
path_funs = [_constant_acceleration_and_heading, _constant_magnitude_accel_and_yaw_rate, _constant_speed_and_yaw_rate, _constant_velocity_heading_from_kinematics]
paths = [path_fun(kinematics, self.sec_from_now, self.sampled_at) for path_fun in path_funs]
oracle = sorted(paths, key=(lambda path: np.linalg.norm((np.array(path) - ground_truth), ord='fro')))[0]
return Prediction(instance, sample, np.expand_dims(oracle, 0), np.array([1]))
|
class TestBackBones(unittest.TestCase):
def count_layers(self, model):
if isinstance(model[4][0], BasicBlock):
n_convs = 2
elif isinstance(model[4][0], Bottleneck):
n_convs = 3
else:
raise ValueError('Backbone layer block not supported!')
return ((sum([len(model[i]) for i in range(4, 8)]) * n_convs) + 2)
def test_resnet(self):
rn_18 = ResNetBackbone('resnet18')
rn_34 = ResNetBackbone('resnet34')
rn_50 = ResNetBackbone('resnet50')
rn_101 = ResNetBackbone('resnet101')
rn_152 = ResNetBackbone('resnet152')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(rn_18(tensor).shape[1], 512)
self.assertEqual(rn_34(tensor).shape[1], 512)
self.assertEqual(rn_50(tensor).shape[1], 2048)
self.assertEqual(rn_101(tensor).shape[1], 2048)
self.assertAlmostEqual(rn_152(tensor).shape[1], 2048)
self.assertEqual(self.count_layers(list(rn_18.backbone.children())), 18)
self.assertEqual(self.count_layers(list(rn_34.backbone.children())), 34)
self.assertEqual(self.count_layers(list(rn_50.backbone.children())), 50)
self.assertEqual(self.count_layers(list(rn_101.backbone.children())), 101)
self.assertEqual(self.count_layers(list(rn_152.backbone.children())), 152)
with self.assertRaises(ValueError):
ResNetBackbone('resnet51')
def test_mobilenet(self):
mobilenet = MobileNetBackbone('mobilenet_v2')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(mobilenet(tensor).shape[1], 1280)
|
class TestPhysicsBaselines(unittest.TestCase):
def test_Baselines_raise_error_when_sec_from_now_bad(self):
with self.assertRaises(AssertionError):
ConstantVelocityHeading(2.23, None)
with self.assertRaises(AssertionError):
PhysicsOracle(2.25, None)
PhysicsOracle(5.5, None)
ConstantVelocityHeading(3, None)
@patch('nuscenes.prediction.models.physics._kinematics_from_tokens')
def test_ConstantVelocityHeading(self, mock_kinematics):
mock_helper = MagicMock(spec=PredictHelper)
mock_helper.get_sample_annotation.return_value = {'translation': [0, 0, 0], 'rotation': [1, 0, 0, 0]}
mock_kinematics.return_value = (0, 0, 1, 0, 2, 0, 1, 0, 2, 0)
cv_model = ConstantVelocityHeading(6, mock_helper)
prediction = cv_model('foo-instance_bar-sample')
answer = np.array([[[0.5, 0], [1, 0], [1.5, 0], [2.0, 0], [2.5, 0], [3.0, 0], [3.5, 0.0], [4.0, 0], [4.5, 0], [5.0, 0], [5.5, 0], [6.0, 0]]])
np.testing.assert_allclose(answer, np.round(prediction.prediction, 3))
@patch('nuscenes.prediction.models.physics._kinematics_from_tokens')
def test_PhysicsOracle(self, mock_kinematics):
mock_helper = MagicMock(spec=PredictHelper)
mock_helper.get_sample_annotation.return_value = {'translation': [0, 0, 0], 'rotation': [1, 0, 0, 0]}
mock_helper.get_future_for_agent.return_value = np.array([[0, 1.3], [0, 2.9], [0, 5.2], [0, 8.3], [0, 11.3], [0, 14.6], [0, 19.29], [0, 23.7], [0, 29.19], [0, 33.0], [0, 41.3], [0, 48.2]])
mock_kinematics.return_value = (0, 0, 0, 2, 0, 2, 2, 0.05, 2, 0)
oracle = PhysicsOracle(6, mock_helper)
prediction = oracle('foo-instance_bar-sample')
answer = np.array([[[0.0, 1.25], [0.0, 3.0], [0.0, 5.25], [0.0, 8.0], [0.0, 11.25], [0.0, 15.0], [0.0, 19.25], [0.0, 24.0], [0.0, 29.25], [0.0, 35.0], [0.0, 41.25], [0.0, 48.0]]])
np.testing.assert_allclose(answer, np.round(prediction.prediction, 3))
@patch('nuscenes.prediction.models.physics._kinematics_from_tokens')
def test_PhysicsOracle_raises_error_when_not_enough_gt(self, mock_kinematics):
mock_helper = MagicMock(spec=PredictHelper)
mock_helper.get_sample_annotation.return_value = {'translation': [0, 0, 0], 'rotation': [1, 0, 0, 0]}
mock_helper.get_future_for_agent.return_value = np.array([[0, 1.3], [0, 2.9], [0, 5.2], [0, 8.3], [0, 11.3], [0, 14.6], [0, 19.29], [0, 23.7], [0, 29.19], [0, 33.0]])
mock_kinematics.return_value = (0, 0, 0, 2, 0, 2, 2, 0.05, 2, 0)
oracle = PhysicsOracle(6, mock_helper)
with self.assertRaises(AssertionError):
oracle('foo-instance_bar-sample')
|
def export_ego_poses(nusc: NuScenes, out_dir: str):
' Script to render where ego vehicle drives on the maps '
locations = np.unique([log['location'] for log in nusc.log])
if (not os.path.isdir(out_dir)):
os.makedirs(out_dir)
for location in locations:
print('Rendering map {}...'.format(location))
nusc.render_egoposes_on_map(location)
out_path = os.path.join(out_dir, 'egoposes-{}.png'.format(location))
plt.tight_layout()
plt.savefig(out_path)
|
def get_poses(nusc: NuScenes, scene_token: str) -> List[dict]:
'\n Return all ego poses for the current scene.\n :param nusc: The NuScenes instance to load the ego poses from.\n :param scene_token: The token of the scene.\n :return: A list of the ego pose dicts.\n '
pose_list = []
scene_rec = nusc.get('scene', scene_token)
sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
ego_pose = nusc.get('ego_pose', sd_rec['token'])
pose_list.append(ego_pose)
while (sd_rec['next'] != ''):
sd_rec = nusc.get('sample_data', sd_rec['next'])
ego_pose = nusc.get('ego_pose', sd_rec['token'])
pose_list.append(ego_pose)
return pose_list
|
def get_coordinate(ref_lat: float, ref_lon: float, bearing: float, dist: float) -> Tuple[(float, float)]:
'\n Using a reference coordinate, extract the coordinates of another point in space given its distance and bearing\n to the reference coordinate. For reference, please see: https://www.movable-type.co.uk/scripts/latlong.html.\n :param ref_lat: Latitude of the reference coordinate in degrees, ie: 42.3368.\n :param ref_lon: Longitude of the reference coordinate in degrees, ie: 71.0578.\n :param bearing: The clockwise angle in radians between target point, reference point and the axis pointing north.\n :param dist: The distance in meters from the reference point to the target point.\n :return: A tuple of lat and lon.\n '
(lat, lon) = (math.radians(ref_lat), math.radians(ref_lon))
angular_distance = (dist / EARTH_RADIUS_METERS)
target_lat = math.asin(((math.sin(lat) * math.cos(angular_distance)) + ((math.cos(lat) * math.sin(angular_distance)) * math.cos(bearing))))
target_lon = (lon + math.atan2(((math.sin(bearing) * math.sin(angular_distance)) * math.cos(lat)), (math.cos(angular_distance) - (math.sin(lat) * math.sin(target_lat)))))
return (math.degrees(target_lat), math.degrees(target_lon))
|
def derive_latlon(location: str, poses: List[Dict[(str, float)]]) -> List[Dict[(str, float)]]:
"\n For each pose value, extract its respective lat/lon coordinate and timestamp.\n \n This makes the following two assumptions in order to work:\n 1. The reference coordinate for each map is in the south-western corner.\n 2. The origin of the global poses is also in the south-western corner (and identical to 1).\n\n :param location: The name of the map the poses correspond to, ie: 'boston-seaport'.\n :param poses: All nuScenes egopose dictionaries of a scene.\n :return: A list of dicts (lat/lon coordinates and timestamps) for each pose.\n "
assert (location in REFERENCE_COORDINATES.keys()), f'Error: The given location: {location}, has no available reference.'
coordinates = []
(reference_lat, reference_lon) = REFERENCE_COORDINATES[location]
for p in poses:
ts = p['timestamp']
(x, y) = p['translation'][:2]
bearing = math.atan((x / y))
distance = math.sqrt(((x ** 2) + (y ** 2)))
(lat, lon) = get_coordinate(reference_lat, reference_lon, bearing, distance)
coordinates.append({'timestamp': ts, 'latitude': lat, 'longitude': lon})
return coordinates
|
def export_kml(coordinates_per_location: Dict[(str, Dict[(str, List[Dict[(str, float)]])])], output_path: str) -> None:
'\n Export the coordinates of a scene to .kml file.\n :param coordinates_per_location: A dict of lat/lon coordinate dicts for each scene.\n :param output_path: Path of the kml file to write to disk.\n '
result = f'''<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>nuScenes ego poses</name>
'''
for (location, coordinates_per_scene) in coordinates_per_location.items():
result += f''' <Folder>
<name>{location}</name>
'''
for (scene_name, coordinates) in coordinates_per_scene.items():
result += f''' <Placemark>
<name>{scene_name}</name>
<LineString>
<tessellate>1</tessellate>
<coordinates>
'''
for coordinate in coordinates:
coordinates_str = ('%.10f,%.10f,%d' % (coordinate['longitude'], coordinate['latitude'], 0))
result += f''' {coordinates_str}
'''
result += f''' </coordinates>
</LineString>
</Placemark>
'''
result += f''' </Folder>
'''
result += f''' </Document>
</kml>'''
with open(output_path, 'w') as f:
f.write(result)
|
def main(dataroot: str, version: str, output_prefix: str, output_format: str='kml') -> None:
'\n Extract the latlon coordinates for each available pose and write the results to a file.\n The file is organized by location and scene_name.\n :param dataroot: Path of the nuScenes dataset.\n :param version: NuScenes version.\n :param output_format: The output file format, kml or json.\n :param output_prefix: Where to save the output file (without the file extension).\n '
nusc = NuScenes(dataroot=dataroot, version=version, verbose=False)
coordinates_per_location = {}
print(f'Extracting coordinates...')
for scene in tqdm(nusc.scene):
scene_name = scene['name']
scene_token = scene['token']
location = nusc.get('log', scene['log_token'])['location']
poses = get_poses(nusc, scene_token)
coordinates = derive_latlon(location, poses)
if (location not in coordinates_per_location):
coordinates_per_location[location] = {}
coordinates_per_location[location][scene_name] = coordinates
dest_dir = os.path.dirname(output_prefix)
if ((dest_dir != '') and (not os.path.exists(dest_dir))):
os.makedirs(dest_dir)
output_path = f'{output_prefix}_{version}.{output_format}'
if (output_format == 'json'):
with open(output_path, 'w') as fh:
json.dump(coordinates_per_location, fh, sort_keys=True, indent=4)
elif (output_format == 'kml'):
export_kml(coordinates_per_location, output_path)
else:
raise Exception(('Error: Invalid output format: %s' % output_format))
print(f'Saved the coordinates in {output_path}')
|
def export_videos(nusc: NuScenes, out_dir: str):
' Export videos of the images displayed in the images. '
scene_tokens = [s['token'] for s in nusc.scene]
if (not os.path.isdir(out_dir)):
os.makedirs(out_dir)
for scene_token in scene_tokens:
scene = nusc.get('scene', scene_token)
print(('Writing scene %s' % scene['name']))
out_path = (os.path.join(out_dir, scene['name']) + '.avi')
if (not os.path.exists(out_path)):
nusc.render_scene(scene['token'], out_path=out_path)
|
def verify_setup(nusc: NuScenes):
'\n Script to verify that the nuScenes installation is complete.\n '
print('Checking that sample_data files are complete...')
for sd in tqdm(nusc.sample_data):
file_path = os.path.join(nusc.dataroot, sd['filename'])
assert os.path.exists(file_path), ('Error: Missing sample_data at: %s' % file_path)
print('Checking that map files are complete...')
for map in tqdm(nusc.map):
file_path = os.path.join(nusc.dataroot, map['filename'])
assert os.path.exists(file_path), ('Error: Missing map at: %s' % file_path)
|
class TestNuScenesLidarseg(unittest.TestCase):
def setUp(self):
assert ('NUSCENES' in os.environ), 'Set NUSCENES env. variable to enable tests.'
self.nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
def test_num_classes(self) -> None:
'\n Check that the correct number of classes (32 classes) are loaded.\n '
self.assertEqual(len(self.nusc.lidarseg_idx2name_mapping), 32)
def test_num_colors(self) -> None:
'\n Check that the number of colors in the colormap matches the number of classes.\n '
num_classes = len(self.nusc.lidarseg_idx2name_mapping)
num_colors = len(self.nusc.colormap)
self.assertEqual(num_colors, num_classes)
def test_classes(self) -> None:
'\n Check that the class names match the ones in the colormap, and are in the same order.\n '
classes_in_colormap = list(self.nusc.colormap.keys())
for (name, idx) in self.nusc.lidarseg_name2idx_mapping.items():
self.assertEqual(name, classes_in_colormap[idx])
|
class TestNuScenes(unittest.TestCase):
def test_load(self):
'\n Loads up NuScenes.\n This is intended to simply run the NuScenes class to check for import errors, typos, etc.\n '
assert ('NUSCENES' in os.environ), 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
self.assertEqual(nusc.table_root, os.path.join(os.environ['NUSCENES'], 'v1.0-mini'))
|
def get_colormap() -> Dict[(str, Tuple[(int, int, int)])]:
'\n Get the defined colormap.\n :return: A mapping from the class names to the respective RGB values.\n '
classname_to_color = {'noise': (0, 0, 0), 'animal': (70, 130, 180), 'human.pedestrian.adult': (0, 0, 230), 'human.pedestrian.child': (135, 206, 235), 'human.pedestrian.construction_worker': (100, 149, 237), 'human.pedestrian.personal_mobility': (219, 112, 147), 'human.pedestrian.police_officer': (0, 0, 128), 'human.pedestrian.stroller': (240, 128, 128), 'human.pedestrian.wheelchair': (138, 43, 226), 'movable_object.barrier': (112, 128, 144), 'movable_object.debris': (210, 105, 30), 'movable_object.pushable_pullable': (105, 105, 105), 'movable_object.trafficcone': (47, 79, 79), 'static_object.bicycle_rack': (188, 143, 143), 'vehicle.bicycle': (220, 20, 60), 'vehicle.bus.bendy': (255, 127, 80), 'vehicle.bus.rigid': (255, 69, 0), 'vehicle.car': (255, 158, 0), 'vehicle.construction': (233, 150, 70), 'vehicle.emergency.ambulance': (255, 83, 0), 'vehicle.emergency.police': (255, 215, 0), 'vehicle.motorcycle': (255, 61, 99), 'vehicle.trailer': (255, 140, 0), 'vehicle.truck': (255, 99, 71), 'flat.driveable_surface': (0, 207, 191), 'flat.other': (175, 0, 75), 'flat.sidewalk': (75, 0, 75), 'flat.terrain': (112, 180, 60), 'static.manmade': (222, 184, 135), 'static.other': (255, 228, 196), 'static.vegetation': (0, 175, 0), 'vehicle.ego': (255, 240, 245)}
return classname_to_color
|
def load_bin_file(bin_path: str, type: str='lidarseg') -> np.ndarray:
"\n Loads a .bin file containing the lidarseg or lidar panoptic labels.\n :param bin_path: Path to the .bin file.\n :param type: semantic type, 'lidarseg': stored in 8-bit format, 'panoptic': store in 32-bit format.\n :return: An array containing the labels, with dtype of np.uint8 for lidarseg and np.int32 for panoptic.\n "
assert os.path.exists(bin_path), 'Error: Unable to find {}.'.format(bin_path)
if (type == 'lidarseg'):
bin_content = np.fromfile(bin_path, dtype=np.uint8)
elif (type == 'panoptic'):
bin_content = np.load(bin_path)['data']
else:
raise TypeError(f'Only lidarseg/panoptic type is supported, received {type}')
assert (len(bin_content) > 0), 'Error: {} is empty.'.format(bin_path)
return bin_content
|
def panoptic_to_lidarseg(panoptic_labels: np.ndarray) -> np.ndarray:
'\n Convert panoptic label array to lidarseg label array\n :param panoptic_labels: <np.array, HxW, np.uint16>, encoded in (instance_id + 1000 * category_idx), note instance_id\n for stuff points is 0.\n :return: lidarseg semantic labels, <np.array, HxW, np.uint8>.\n '
return (panoptic_labels // 1000).astype(np.uint8)
|
def create_splits_logs(split: str, nusc: 'NuScenes') -> List[str]:
'\n Returns the logs in each dataset split of nuScenes.\n Note: Previously this script included the teaser dataset splits. Since new scenes from those logs were added and\n others removed in the full dataset, that code is incompatible and was removed.\n :param split: NuScenes split.\n :param nusc: NuScenes instance.\n :return: A list of logs in that split.\n '
scene_splits = create_splits_scenes(verbose=False)
assert (split in scene_splits.keys()), 'Requested split {} which is not a known nuScenes split.'.format(split)
version = nusc.version
if (split in {'train', 'val', 'train_detect', 'train_track'}):
assert version.endswith('trainval'), 'Requested split {} which is not compatible with NuScenes version {}'.format(split, version)
elif (split in {'mini_train', 'mini_val'}):
assert version.endswith('mini'), 'Requested split {} which is not compatible with NuScenes version {}'.format(split, version)
elif (split == 'test'):
assert version.endswith('test'), 'Requested split {} which is not compatible with NuScenes version {}'.format(split, version)
else:
raise ValueError('Requested split {} which this function cannot map to logs.'.format(split))
scene_to_log = {scene['name']: nusc.get('log', scene['log_token'])['logfile'] for scene in nusc.scene}
logs = set()
scenes = scene_splits[split]
for scene in scenes:
logs.add(scene_to_log[scene])
return list(logs)
|
def create_splits_scenes(verbose: bool=False) -> Dict[(str, List[str])]:
'\n Similar to create_splits_logs, but returns a mapping from split to scene names, rather than log names.\n The splits are as follows:\n - train/val/test: The standard splits of the nuScenes dataset (700/150/150 scenes).\n - mini_train/mini_val: Train and val splits of the mini subset used for visualization and debugging (8/2 scenes).\n - train_detect/train_track: Two halves of the train split used for separating the training sets of detector and\n tracker if required.\n :param verbose: Whether to print out statistics on a scene level.\n :return: A mapping from split name to a list of scenes names in that split.\n '
all_scenes = ((train + val) + test)
assert ((len(all_scenes) == 1000) and (len(set(all_scenes)) == 1000)), 'Error: Splits incomplete!'
scene_splits = {'train': train, 'val': val, 'test': test, 'mini_train': mini_train, 'mini_val': mini_val, 'train_detect': train_detect, 'train_track': train_track}
if verbose:
for (split, scenes) in scene_splits.items():
print(('%s: %d' % (split, len(scenes))))
print(('%s' % scenes))
return scene_splits
|
class TestDataClasses(unittest.TestCase):
def test_load_pointclouds(self):
'\n Loads up lidar and radar pointclouds.\n '
assert ('NUSCENES' in os.environ), 'Set NUSCENES env. variable to enable tests.'
dataroot = os.environ['NUSCENES']
nusc = NuScenes(version='v1.0-mini', dataroot=dataroot, verbose=False)
sample_rec = nusc.sample[0]
lidar_name = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])['filename']
radar_name = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT'])['filename']
lidar_path = os.path.join(dataroot, lidar_name)
radar_path = os.path.join(dataroot, radar_name)
pc1 = LidarPointCloud.from_file(lidar_path)
pc2 = RadarPointCloud.from_file(radar_path)
(pc3, _) = LidarPointCloud.from_file_multisweep(nusc, sample_rec, 'LIDAR_TOP', 'LIDAR_TOP', nsweeps=2)
(pc4, _) = RadarPointCloud.from_file_multisweep(nusc, sample_rec, 'RADAR_FRONT', 'RADAR_FRONT', nsweeps=2)
assert (pc1.points.shape[0] == pc3.points.shape[0] == 4), 'Error: Invalid dimension for lidar pointcloud!'
assert (pc2.points.shape[0] == pc4.points.shape[0] == 18), 'Error: Invalid dimension for radar pointcloud!'
assert (pc1.points.dtype == pc3.points.dtype), 'Error: Invalid dtype for lidar pointcloud!'
assert (pc2.points.dtype == pc4.points.dtype), 'Error: Invalid dtype for radar pointcloud!'
|
class TestGeometryUtils(unittest.TestCase):
def test_quaternion_yaw(self):
'Test valid and invalid inputs for quaternion_yaw().'
for yaw_in in np.linspace((- 10), 10, 100):
q = Quaternion(axis=(0, 0, 1), angle=yaw_in)
yaw_true = (yaw_in % (2 * np.pi))
if (yaw_true > np.pi):
yaw_true -= (2 * np.pi)
yaw_test = quaternion_yaw(q)
self.assertAlmostEqual(yaw_true, yaw_test)
yaw_in = (np.pi / 4)
q = Quaternion(axis=(0, 0, 0.5), angle=yaw_in)
yaw_test = quaternion_yaw(q)
self.assertAlmostEqual(yaw_in, yaw_test)
yaw_in = (np.pi / 4)
q = Quaternion(axis=(0, 0, (- 1)), angle=yaw_in)
yaw_test = (- quaternion_yaw(q))
self.assertAlmostEqual(yaw_in, yaw_test)
yaw_in = (np.pi / 4)
q = Quaternion(axis=(0, 1, 0), angle=yaw_in)
yaw_test = quaternion_yaw(q)
self.assertAlmostEqual(0, yaw_test)
yaw_in = (np.pi / 2)
q = Quaternion(axis=(0, 1, 1), angle=yaw_in)
yaw_test = quaternion_yaw(q)
self.assertAlmostEqual(yaw_in, yaw_test)
yaw_in = (np.pi / 2)
q = (Quaternion(axis=(0, 0, 1), angle=yaw_in) * Quaternion(axis=(0, 1, 0), angle=0.5821))
yaw_test = quaternion_yaw(q)
self.assertAlmostEqual(yaw_in, yaw_test)
def test_points_in_box(self):
' Test the box.in_box method. '
vel = (np.nan, np.nan, np.nan)
def qyaw(yaw):
return Quaternion(axis=(0, 0, 1), angle=yaw)
box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0]]).transpose()
mask = points_in_box(box, points, wlh_factor=1.0)
self.assertEqual(mask.all(), True)
box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
points = np.array([[0.1, 0.0, 0.0], [0.5, (- 1.1), 0.0]]).transpose()
mask = points_in_box(box, points, wlh_factor=1.0)
self.assertEqual(mask.all(), False)
box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
points = np.array([[(- 1.0), (- 1.0), 0.0], [1.0, 1.0, 0.0]]).transpose()
mask = points_in_box(box, points, wlh_factor=1.0)
self.assertEqual(mask.all(), True)
rot = 45
trans = [1.0, 1.0]
box = Box([(0.0 + trans[0]), (0.0 + trans[1]), 0.0], [2.0, 2.0, 0.0], qyaw(((rot / 180.0) * np.pi)), 1, 2.0, vel)
points = np.array([[(0.7 + trans[0]), (0.7 + trans[1]), 0.0], [(0.71 + 1.0), (0.71 + 1.0), 0.0]]).transpose()
mask = points_in_box(box, points, wlh_factor=1.0)
self.assertEqual(mask[0], True)
self.assertEqual(mask[1], False)
box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 2.0], qyaw(0.0), 1, 2.0, vel)
points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]).transpose()
mask = points_in_box(box, points, wlh_factor=1.0)
self.assertEqual(mask.all(), True)
for wlh_factor in [0.5, 1.0, 1.5, 10.0]:
box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0]]).transpose()
mask = points_in_box(box, points, wlh_factor=wlh_factor)
self.assertEqual(mask.all(), True)
for wlh_factor in [0.1, 0.49]:
box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0]]).transpose()
mask = points_in_box(box, points, wlh_factor=wlh_factor)
self.assertEqual(mask[0], True)
self.assertEqual(mask[1], False)
|
class TestLoad(unittest.TestCase):
fixture = 'testmap.png'
foreground = 255
native_res = 0.1
small_number = 1e-05
half_gt = ((native_res / 2) + small_number)
half_lt = ((native_res / 2) - small_number)
def setUp(self):
mask = np.zeros((50, 40))
mask[(30, 20)] = self.foreground
cv2.imwrite(filename=self.fixture, img=mask)
def tearDown(self):
os.remove(self.fixture)
def test_native_resolution(self):
map_mask = MapMask(self.fixture, resolution=0.1)
self.assertTrue(map_mask.is_on_mask(2, 2))
self.assertTrue(map_mask.is_on_mask((2 + self.half_lt), 2))
self.assertTrue(map_mask.is_on_mask((2 - self.half_lt), 2))
self.assertTrue(map_mask.is_on_mask(2, (2 + self.half_lt)))
self.assertTrue(map_mask.is_on_mask(2, (2 - self.half_lt)))
self.assertFalse(map_mask.is_on_mask((2 + self.half_gt), 2))
self.assertFalse(map_mask.is_on_mask((2 + self.half_gt), 2))
self.assertFalse(map_mask.is_on_mask(2, (2 + self.half_gt)))
self.assertFalse(map_mask.is_on_mask(2, (2 + self.half_gt)))
def test_edges(self):
mask = (np.ones((50, 40)) * self.foreground)
cv2.imwrite(filename=self.fixture, img=mask)
map_mask = MapMask(self.fixture, resolution=0.1)
self.assertTrue(map_mask.is_on_mask(0, 0.1))
self.assertTrue(map_mask.is_on_mask(0, 5))
self.assertTrue(map_mask.is_on_mask(3.9, 0.1))
self.assertTrue(map_mask.is_on_mask(3.9, 5))
self.assertFalse(map_mask.is_on_mask((3.9 + self.half_gt), 0.1))
self.assertFalse(map_mask.is_on_mask((3.9 + self.half_gt), 5))
self.assertFalse(map_mask.is_on_mask((0 - self.half_gt), 0.1))
self.assertFalse(map_mask.is_on_mask((0 - self.half_gt), 5))
def test_dilation(self):
map_mask = MapMask(self.fixture, resolution=0.1)
self.assertTrue(map_mask.is_on_mask(2, 2))
self.assertFalse(map_mask.is_on_mask(2, 3))
self.assertTrue(map_mask.is_on_mask(2, 3, dilation=1))
self.assertTrue(map_mask.is_on_mask(3, 2, dilation=1))
self.assertTrue(map_mask.is_on_mask((2 + np.sqrt((1 / 2))), (2 + np.sqrt((1 / 2))), dilation=1))
self.assertFalse(map_mask.is_on_mask(2, 3, dilation=0.9))
def test_coarse_resolution(self):
mask = np.zeros((50, 40))
mask[(30, 20)] = self.foreground
mask[(31, 20)] = self.foreground
mask[(30, 21)] = self.foreground
mask[(31, 21)] = self.foreground
cv2.imwrite(filename=self.fixture, img=mask)
map_mask = MapMask(self.fixture, resolution=0.2)
self.assertTrue(map_mask.is_on_mask(2, 2))
self.assertFalse(map_mask.is_on_mask(2, 4))
self.assertTrue(map_mask.is_on_mask(2, 4, dilation=2))
self.assertFalse(map_mask.is_on_mask(2, 4, dilation=1.9))
|
def knn_gather_wrapper(som_node, som_node_knn_I):
'\n\n :param som_node: Bx3xN\n :param som_node_knn_I: BxNxK\n :param som_node_neighbors: Bx3xNxK\n :return:\n '
B = som_node.size()[0]
C = som_node.size()[1]
N = som_node.size()[2]
K = som_node_knn_I.size()[2]
assert (C == 3)
som_node_neighbors = knn_gather_by_indexing(som_node, som_node_knn_I)
return som_node_neighbors
|
def knn_gather_by_indexing(som_node, som_node_knn_I):
'\n\n :param som_node: BxCxN\n :param som_node_knn_I: BxNxK\n :param som_node_neighbors: BxCxNxK\n :return:\n '
B = som_node.size()[0]
C = som_node.size()[1]
N = som_node.size()[2]
K = som_node_knn_I.size()[2]
som_node_knn_I = som_node_knn_I.unsqueeze(1).expand(B, C, N, K).contiguous().view(B, C, (N * K))
som_node_neighbors = torch.gather(som_node, dim=2, index=som_node_knn_I).view(B, C, N, K)
return som_node_neighbors
|
class Options():
def __init__(self):
self.is_debug = False
self.is_fine_resolution = True
self.is_remove_ground = False
self.accumulation_frame_num = 3
self.accumulation_frame_skip = 6
self.delta_ij_max = 40
self.translation_max = 10.0
self.crop_original_top_rows = 50
self.img_scale = 0.5
self.img_H = 160
self.img_W = 512
self.img_fine_resolution_scale = 32
self.input_pt_num = 40960
self.pc_min_range = (- 1.0)
self.pc_max_range = 80.0
self.node_a_num = 128
self.node_b_num = 128
self.k_ab = 16
self.k_interp_ab = 3
self.k_interp_point_a = 3
self.k_interp_point_b = 3
self.P_tx_amplitude = 0
self.P_ty_amplitude = 0
self.P_tz_amplitude = 0
self.P_Rx_amplitude = ((0.0 * math.pi) / 12.0)
self.P_Ry_amplitude = (2.0 * math.pi)
self.P_Rz_amplitude = ((0.0 * math.pi) / 12.0)
self.dataloader_threads = 10
self.batch_size = 8
self.gpu_ids = [0]
self.device = torch.device('cuda', self.gpu_ids[0])
self.normalization = 'batch'
self.norm_momentum = 0.1
self.activation = 'relu'
self.lr = 0.001
self.lr_decay_step = 20
self.lr_decay_scale = 0.5
self.vis_max_batch = 4
if self.is_fine_resolution:
self.coarse_loss_alpha = 50
else:
self.coarse_loss_alpha = 1
|
class Shapes(object):
def __init__(self, dataset_zip=None):
loc = 'data/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'
if (dataset_zip is None):
self.dataset_zip = np.load(loc, encoding='latin1')
else:
self.dataset_zip = dataset_zip
self.imgs = torch.from_numpy(self.dataset_zip['imgs']).float()
def __len__(self):
return self.imgs.size(0)
def __getitem__(self, index):
x = self.imgs[index].view(1, 64, 64)
return x
|
class Dataset(object):
def __init__(self, loc):
self.dataset = torch.load(loc).float().div(255).view((- 1), 1, 64, 64)
def __len__(self):
return self.dataset.size(0)
@property
def ndim(self):
return self.dataset.size(1)
def __getitem__(self, index):
return self.dataset[index]
|
class Faces(Dataset):
LOC = 'data/basel_face_renders.pth'
def __init__(self):
return super(Faces, self).__init__(self.LOC)
|
class Normal(nn.Module):
'Samples from a Normal distribution using the reparameterization trick.\n '
def __init__(self, mu=0, sigma=1):
super(Normal, self).__init__()
self.normalization = Variable(torch.Tensor([np.log((2 * np.pi))]))
self.mu = Variable(torch.Tensor([mu]))
self.logsigma = Variable(torch.Tensor([math.log(sigma)]))
def _check_inputs(self, size, mu_logsigma):
if ((size is None) and (mu_logsigma is None)):
raise ValueError('Either one of size or params should be provided.')
elif ((size is not None) and (mu_logsigma is not None)):
mu = mu_logsigma.select((- 1), 0).expand(size)
logsigma = mu_logsigma.select((- 1), 1).expand(size)
return (mu, logsigma)
elif (size is not None):
mu = self.mu.expand(size)
logsigma = self.logsigma.expand(size)
return (mu, logsigma)
elif (mu_logsigma is not None):
mu = mu_logsigma.select((- 1), 0)
logsigma = mu_logsigma.select((- 1), 1)
return (mu, logsigma)
else:
raise ValueError('Given invalid inputs: size={}, mu_logsigma={})'.format(size, mu_logsigma))
def sample(self, size=None, params=None):
(mu, logsigma) = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = ((std_z * torch.exp(logsigma)) + mu)
return sample
def log_density(self, sample, params=None):
if (params is not None):
(mu, logsigma) = self._check_inputs(None, params)
else:
(mu, logsigma) = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp((- logsigma))
tmp = ((sample - mu) * inv_sigma)
return ((- 0.5) * (((tmp * tmp) + (2 * logsigma)) + c))
def NLL(self, params, sample_params=None):
'Analytically computes\n E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]\n If mu_2, and sigma_2^2 are not provided, defaults to entropy.\n '
(mu, logsigma) = self._check_inputs(None, params)
if (sample_params is not None):
(sample_mu, sample_logsigma) = self._check_inputs(None, sample_params)
else:
(sample_mu, sample_logsigma) = (mu, logsigma)
c = self.normalization.type_as(sample_mu.data)
nll = ((((logsigma.mul((- 2)).exp() * (sample_mu - mu).pow(2)) + torch.exp((sample_logsigma.mul(2) - logsigma.mul(2)))) + (2 * logsigma)) + c)
return nll.mul(0.5)
def kld(self, params):
'Computes KL(q||p) where q is the given distribution and p\n is the standard Normal distribution.\n '
(mu, logsigma) = self._check_inputs(None, params)
kld = ((logsigma.mul(2).add(1) - mu.pow(2)) - logsigma.exp().pow(2))
kld.mul_((- 0.5))
return kld
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = (self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(self.mu.data[0], self.logsigma.exp().data[0]))
return tmpstr
|
class Laplace(nn.Module):
'Samples from a Laplace distribution using the reparameterization trick.\n '
def __init__(self, mu=0, scale=1):
super(Laplace, self).__init__()
self.normalization = Variable(torch.Tensor([(- math.log(2))]))
self.mu = Variable(torch.Tensor([mu]))
self.logscale = Variable(torch.Tensor([math.log(scale)]))
def _check_inputs(self, size, mu_logscale):
if ((size is None) and (mu_logscale is None)):
raise ValueError('Either one of size or params should be provided.')
elif ((size is not None) and (mu_logscale is not None)):
mu = mu_logscale.select((- 1), 0).expand(size)
logscale = mu_logscale.select((- 1), 1).expand(size)
return (mu, logscale)
elif (size is not None):
mu = self.mu.expand(size)
logscale = self.logscale.expand(size)
return (mu, logscale)
elif (mu_logscale is not None):
mu = mu_logscale.select((- 1), 0)
logscale = mu_logscale.select((- 1), 1)
return (mu, logscale)
else:
raise ValueError('Given invalid inputs: size={}, mu_logscale={})'.format(size, mu_logscale))
def sample(self, size=None, params=None):
(mu, logscale) = self._check_inputs(size, params)
scale = torch.exp(logscale)
u = (Variable(torch.rand(mu.size()).type_as(mu.data)) - 0.5)
sample = (mu - ((scale * torch.sign(u)) * torch.log(((1 - (2 * torch.abs(u))) + eps))))
return sample
def log_density(self, sample, params=None):
if (params is not None):
(mu, logscale) = self._check_inputs(None, params)
else:
(mu, logscale) = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logscale = logscale.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_scale = torch.exp((- logscale))
ins_exp = ((- torch.abs((sample - mu))) * inv_scale)
return ((ins_exp + c) - logscale)
def get_params(self):
return torch.cat([self.mu, self.logscale])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = (self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(self.mu.data[0], self.logscale.exp().data[0]))
return tmpstr
|
class Bernoulli(nn.Module):
'Samples from a Bernoulli distribution where the probability is given\n by the sigmoid of the given parameter.\n '
def __init__(self, p=0.5, stgradient=False):
super(Bernoulli, self).__init__()
p = torch.Tensor([p])
self.p = Variable(torch.log(((p / (1 - p)) + eps)))
self.stgradient = stgradient
def _check_inputs(self, size, ps):
if ((size is None) and (ps is None)):
raise ValueError('Either one of size or params should be provided.')
elif ((size is not None) and (ps is not None)):
if (ps.ndimension() > len(size)):
return ps.squeeze((- 1)).expand(size)
else:
return ps.expand(size)
elif (size is not None):
return self.p.expand(size)
elif (ps is not None):
return ps
else:
raise ValueError('Given invalid inputs: size={}, ps={})'.format(size, ps))
def _sample_logistic(self, size):
u = Variable(torch.rand(size))
l = (torch.log((u + eps)) - torch.log(((1 - u) + eps)))
return l
def sample(self, size=None, params=None):
presigm_ps = self._check_inputs(size, params)
logp = F.logsigmoid(presigm_ps)
logq = F.logsigmoid((- presigm_ps))
l = self._sample_logistic(logp.size()).type_as(presigm_ps)
z = ((logp - logq) + l)
b = STHeaviside.apply(z)
return (b if self.stgradient else b.detach())
def log_density(self, sample, params=None):
presigm_ps = self._check_inputs(sample.size(), params).type_as(sample)
p = ((F.sigmoid(presigm_ps) + eps) * (1 - (2 * eps)))
logp = ((sample * torch.log((p + eps))) + ((1 - sample) * torch.log(((1 - p) + eps))))
return logp
def get_params(self):
return self.p
@property
def nparams(self):
return 1
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return self.stgradient
def __repr__(self):
tmpstr = (self.__class__.__name__ + ' ({:.3f})'.format(torch.sigmoid(self.p.data)[0]))
return tmpstr
|
class FactorialNormalizingFlow(nn.Module):
def __init__(self, dim, nsteps):
super(FactorialNormalizingFlow, self).__init__()
self.dim = dim
self.nsteps = nsteps
self.x_dist = Normal()
self.scale = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.weight = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.bias = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.reset_parameters()
def reset_parameters(self):
self.scale.data.normal_(0, 0.02)
self.weight.data.normal_(0, 0.02)
self.bias.data.normal_(0, 0.02)
def sample(self, batch_size):
raise NotImplementedError
def log_density(self, y, params=None):
assert (y.size(1) == self.dim)
x = y
logdetgrad = Variable(torch.zeros(y.size()).type_as(y.data))
for i in range(self.nsteps):
u = self.scale[i][None]
w = self.weight[i][None]
b = self.bias[i][None]
act = F.tanh(((x * w) + b))
x = (x + (u * act))
logdetgrad = (logdetgrad + torch.log((torch.abs((1 + ((u * (1 - act.pow(2))) * w))) + eps)))
logpx = self.x_dist.log_density(x)
logpy = (logpx + logdetgrad)
return logpy
|
class STHeaviside(Function):
@staticmethod
def forward(ctx, x):
y = torch.zeros(x.size()).type_as(x)
y[(x >= 0)] = 1
return y
@staticmethod
def backward(ctx, grad_output):
return grad_output
|
def save_checkpoint(state, save, epoch):
if (not os.path.exists(save)):
os.makedirs(save)
filename = os.path.join(save, ('checkpt-%04d.pth' % epoch))
torch.save(state, filename)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
class RunningAverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, momentum=0.97):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if (self.val is None):
self.avg = val
else:
self.avg = ((self.avg * self.momentum) + (val * (1 - self.momentum)))
self.val = val
|
def isnan(tensor):
return (tensor != tensor)
|
def logsumexp(value, dim=None, keepdim=False):
'Numerically stable implementation of the operation\n\n value.exp().sum(dim, keepdim).log()\n '
if (dim is not None):
(m, _) = torch.max(value, dim=dim, keepdim=True)
value0 = (value - m)
if (keepdim is False):
m = m.squeeze(dim)
return (m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim)))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp((value - m)))
if isinstance(sum_exp, Number):
return (m + math.log(sum_exp))
else:
return (m + torch.log(sum_exp))
|
def load_model_and_dataset(checkpt_filename):
print('Loading model and dataset.')
checkpt = torch.load(checkpt_filename, map_location=(lambda storage, loc: storage))
args = checkpt['args']
state_dict = checkpt['state_dict']
if (not hasattr(args, 'conv')):
args.conv = False
if ((not hasattr(args, 'dist')) or (args.dist == 'normal')):
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif (args.dist == 'laplace'):
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif (args.dist == 'flow'):
prior_dist = flows.FactorialNormalizingFlow(dim=args.latent_dim, nsteps=32)
q_dist = dist.Normal()
if hasattr(args, 'ncon'):
model = infogan.Model(args.latent_dim, n_con=args.ncon, n_cat=args.ncat, cat_dim=args.cat_dim, use_cuda=True, conv=args.conv)
model.load_state_dict(state_dict, strict=False)
vae = vae_quant.VAE(z_dim=args.ncon, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.encoder = model.encoder
vae.decoder = model.decoder
else:
vae = vae_quant.VAE(z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.load_state_dict(state_dict, strict=False)
loader = vae_quant.setup_data_loaders(args)
return (vae, loader.dataset, args)
|
def get_dataset(name):
mod = __import__('lisrd.datasets.{}'.format(name), fromlist=[''])
return getattr(mod, _module_to_class(name))
|
def _module_to_class(name):
return ''.join((n.capitalize() for n in name.split('_')))
|
class BaseDataset(metaclass=ABCMeta):
' Base dataset class.\n\n Arguments:\n config: A dictionary containing the configuration parameters.\n device: The device to train/test on.\n '
required_baseconfig = ['batch_size', 'test_batch_size', 'sizes']
def __init__(self, config, device):
self._config = config
self._device = device
required = (self.required_baseconfig + getattr(self, 'required_config_keys', []))
for r in required:
assert (r in self._config), "Required configuration entry: '{}'".format(r)
seed = self._config.get('seed', 0)
np.random.seed(seed)
torch.manual_seed(seed)
@abstractmethod
def get_dataset(self, split):
'To be implemented by the child class.'
raise NotImplementedError
def get_data_loader(self, split):
'Return a data loader for a given split.'
assert (split in ['train', 'val', 'test'])
batch_size = (self._config['test_batch_size'] if (split == 'test') else self._config['batch_size'])
return DataLoader(self.get_dataset(split), batch_size=batch_size, shuffle=(split == 'train'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.