| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | import torch |
| | import torch.nn as nn |
| | from tqdm import tqdm |
| |
|
| | |
| | |
| | from model import * |
| |
|
| | |
| | import numpy as np |
| | |
| | |
| | import sys |
| | import os |
| | from sklearn.metrics import explained_variance_score, mean_squared_error |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | model_dir = './model/semantic_SemanticCNN_model.pth' |
| | NUM_ARGS = 3 |
| | HYP_EXT = ".hyp" |
| | GRT_EXT = ".grt" |
| |
|
| | |
| | |
| | SPACE = " " |
| | NEW_LINE = "\n" |
| |
|
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | def explained_variance(input, target): |
| | ev = 1 - np.var(target - input) / np.var(input) |
| | return ev |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | def main(argv): |
| | |
| | |
| | |
| | if(len(argv) != NUM_ARGS): |
| | print("usage: python nedc_train_mdl.py [MDL_PATH] [TRAIN_PATH] [DEV_PATH]") |
| | exit(-1) |
| |
|
| | |
| | |
| | |
| | odir = argv[0] |
| | mdl_path = argv[1] |
| | pTest = argv[2] |
| |
|
| | |
| | |
| | if not os.path.exists(odir): |
| | os.makedirs(odir) |
| |
|
| | |
| | |
| | hyp_name = os.path.splitext(os.path.basename(pTest))[0] + HYP_EXT |
| | grt_name = os.path.splitext(os.path.basename(pTest))[0] + GRT_EXT |
| |
|
| | |
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| |
|
| | |
| | |
| | |
| | |
| | eval_dataset = NavDataset(pTest,'test') |
| | eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=1, \ |
| | shuffle=False, drop_last=True) |
| |
|
| | |
| | model = SemanticCNN(Bottleneck, [2, 1, 1]) |
| | |
| | model.to(device) |
| |
|
| | |
| | |
| | model.eval() |
| |
|
| | |
| | criterion = nn.MSELoss(reduction='sum') |
| | criterion.to(device) |
| |
|
| | |
| | |
| | checkpoint = torch.load(mdl_path, map_location=device) |
| | model.load_state_dict(checkpoint['model']) |
| |
|
| | |
| | |
| | try: |
| | ofile = open(os.path.join(odir, hyp_name), 'w+') |
| | vel_file = open(os.path.join(odir, grt_name), 'w+') |
| | except IOError as e: |
| | print(os.path.join(odir, hyp_name)) |
| | print("[%s]: %s" % (hyp_name, e.strerror)) |
| | exit(-1) |
| |
|
| | |
| | |
| | counter = 0 |
| | running_loss = 0 |
| | |
| | num_batches = int(len(eval_dataset)/eval_dataloader.batch_size) |
| | with torch.no_grad(): |
| | for i, batch in tqdm(enumerate(eval_dataloader), total=num_batches): |
| | |
| | counter += 1 |
| | |
| | scan_maps = batch['scan_map'] |
| | scan_maps = scan_maps.to(device) |
| | semantic_maps = batch['semantic_map'] |
| | semantic_maps = semantic_maps.to(device) |
| | sub_goals = batch['sub_goal'] |
| | sub_goals = sub_goals.to(device) |
| | velocities = batch['velocity'] |
| | velocities = velocities.to(device) |
| |
|
| | |
| | |
| | output = model(scan_maps, semantic_maps, sub_goals) |
| | |
| | |
| | |
| | loss = criterion(output, velocities) |
| | |
| | |
| | if torch.cuda.device_count() > 1: |
| | loss = loss.mean() |
| |
|
| | running_loss += loss.item() |
| | |
| | |
| | ofile.write(str(float(output.data.cpu().numpy()[0,0])) + \ |
| | SPACE + str(float(output.data.cpu().numpy()[0,1])) + NEW_LINE) |
| | vel_file.write(str(float(velocities[0,0])) + \ |
| | SPACE + str(float(velocities[0,1])) + NEW_LINE) |
| |
|
| | |
| | val_loss = running_loss / counter |
| | print('Validation set: Average loss: {:.4f}'.format(val_loss)) |
| | |
| | |
| | ofile.close() |
| | vel_file.close() |
| |
|
| | |
| | |
| | |
| | return True |
| | |
| | |
| |
|
| |
|
| | |
| | |
| | if __name__ == '__main__': |
| | main(sys.argv[1:]) |
| | |
| | |
| |
|