T1any1 commited on
Commit
658bee8
·
verified ·
1 Parent(s): 333c14e

Upload 46 files

Browse files
Files changed (46) hide show
  1. checkpoints/release_r2r/ckpt.iter12000.pth +3 -0
  2. checkpoints/release_rxr/ckpt.iter19600.pth +3 -0
  3. connectivity_graphs.pkl +3 -0
  4. datasets/R2R_VLNCE_v1-2_preprocessed/embeddings.json.gz +3 -0
  5. datasets/R2R_VLNCE_v1-2_preprocessed/envdrop/envdrop.json.gz +3 -0
  6. datasets/R2R_VLNCE_v1-2_preprocessed/envdrop/envdrop_gt.json.gz +3 -0
  7. datasets/R2R_VLNCE_v1-2_preprocessed/joint_train_envdrop/joint_train_envdrop.json.gz +3 -0
  8. datasets/R2R_VLNCE_v1-2_preprocessed/joint_train_envdrop/joint_train_envdrop_gt.json.gz +3 -0
  9. datasets/R2R_VLNCE_v1-2_preprocessed/test/test.json.gz +3 -0
  10. datasets/R2R_VLNCE_v1-2_preprocessed/train/train.json.gz +3 -0
  11. datasets/R2R_VLNCE_v1-2_preprocessed/train/train_gt.json.gz +3 -0
  12. datasets/R2R_VLNCE_v1-2_preprocessed/val_seen/val_seen.json.gz +3 -0
  13. datasets/R2R_VLNCE_v1-2_preprocessed/val_seen/val_seen_gt.json.gz +3 -0
  14. datasets/R2R_VLNCE_v1-2_preprocessed/val_unseen/val_unseen.json.gz +3 -0
  15. datasets/R2R_VLNCE_v1-2_preprocessed/val_unseen/val_unseen_gt.json.gz +3 -0
  16. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/process_instrs_to_bert_idx.py +77 -0
  17. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/test/test_bertidx.json.gz +3 -0
  18. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/test/test_raw.json.gz +3 -0
  19. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/train/train_bertidx.json.gz +3 -0
  20. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/train/train_raw.json.gz +3 -0
  21. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/val_seen/val_seen_bertidx.json.gz +3 -0
  22. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/val_seen/val_seen_raw.json.gz +3 -0
  23. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/val_unseen/val_unseen_bertidx.json.gz +3 -0
  24. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/val_unseen/val_unseen_raw.json.gz +3 -0
  25. datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/vocab.txt +0 -0
  26. datasets/RxR_VLNCE_v0_enc_xlmr/test_challenge/test_challenge_guide.json.gz +3 -0
  27. datasets/RxR_VLNCE_v0_enc_xlmr/test_challenge/test_challenge_guide_raw.json.gz +3 -0
  28. datasets/RxR_VLNCE_v0_enc_xlmr/train/train_follower.json.gz +3 -0
  29. datasets/RxR_VLNCE_v0_enc_xlmr/train/train_follower_gt.json.gz +3 -0
  30. datasets/RxR_VLNCE_v0_enc_xlmr/train/train_guide.json.gz +3 -0
  31. datasets/RxR_VLNCE_v0_enc_xlmr/train/train_guide_gt.json.gz +3 -0
  32. datasets/RxR_VLNCE_v0_enc_xlmr/train/train_guide_raw.json.gz +3 -0
  33. datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_follower.json.gz +3 -0
  34. datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_follower_gt.json.gz +3 -0
  35. datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_guide.json.gz +3 -0
  36. datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_guide_gt.json.gz +3 -0
  37. datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_guide_raw.json.gz +3 -0
  38. datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_follower.json.gz +3 -0
  39. datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_follower_gt.json.gz +3 -0
  40. datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_guide.json.gz +3 -0
  41. datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_guide_gt.json.gz +3 -0
  42. datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_guide_raw.json.gz +3 -0
  43. datasets/RxR_VLNCE_v0_enc_xlmr/xlm_tokenize.py +27 -0
  44. ddppo-models/gibson-2plus-resnet50.pth +3 -0
  45. pretrained/ETP/mlm.sap_r2r/ckpts/model_step_82500.pt +3 -0
  46. pretrained/ETP/mlm.sap_rxr/ckpts/model_step_90000.pt +3 -0
checkpoints/release_r2r/ckpt.iter12000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e70d2a1b4a6cfb32158901b7300cbc2be657641f16c9a17f35ca8570470d8b3
3
+ size 2071802352
checkpoints/release_rxr/ckpt.iter19600.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42208b9357bdb77a5e37504c9a2a8c1ab903722de3084968a8308048abb6e327
3
+ size 4094548784
connectivity_graphs.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:806653531d5a08bf6bbe8a12b7cb555ef82db04d19f4aa630f4afae494f82ba6
3
+ size 2282592
datasets/R2R_VLNCE_v1-2_preprocessed/embeddings.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2169054a23abed01959607139e7b5ea45383ec209ad5b0244a59f8d9621b09c0
3
+ size 1036067
datasets/R2R_VLNCE_v1-2_preprocessed/envdrop/envdrop.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2325b17df4a226022fd4f5e975b6d82fe2fb902b7dc5b2f1fef63ae9606fccb3
3
+ size 20830954
datasets/R2R_VLNCE_v1-2_preprocessed/envdrop/envdrop_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a1690cc1134961dd99705e37be68b221d696cb929db3280c8942381722f5ff3
3
+ size 138942121
datasets/R2R_VLNCE_v1-2_preprocessed/joint_train_envdrop/joint_train_envdrop.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13eb1f1f91d423a2c560ccb78fe2ecffb884bec8398a5b5db1ef040b0161006f
3
+ size 22731731
datasets/R2R_VLNCE_v1-2_preprocessed/joint_train_envdrop/joint_train_envdrop_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d40809491a00ef618f495fc32a95d287342f9238cf0371778fceb3b3febfdc3d
3
+ size 146191379
datasets/R2R_VLNCE_v1-2_preprocessed/test/test.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47ed2aea3b9f7534d084fb6f969f3a69a3a6663d651689c2fdaf0d0ad007ca5b
3
+ size 627311
datasets/R2R_VLNCE_v1-2_preprocessed/train/train.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9340166cfaef49885c859f260546a2f81bb4f7d970fd66847fb213178038be1
3
+ size 1942263
datasets/R2R_VLNCE_v1-2_preprocessed/train/train_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1f380f83e898dc50383f2fae5fbefac1a9c62a766dde1c30b254db47e93b0ac
3
+ size 9624256
datasets/R2R_VLNCE_v1-2_preprocessed/val_seen/val_seen.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f1c95a827675f9fe1240b03ac98fe2c7a5903a31253aa13f371a31c07ce915
3
+ size 181250
datasets/R2R_VLNCE_v1-2_preprocessed/val_seen/val_seen_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c1df3c1f857b5f974192ea60da5d7dc71ccdd61cdfbdcd004258d5c57b2d0d0
3
+ size 482513
datasets/R2R_VLNCE_v1-2_preprocessed/val_unseen/val_unseen.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b48654aed06f0323340b6a3b3854ca98e049cb77e612b0b4e55f45a77f8e9a27
3
+ size 354389
datasets/R2R_VLNCE_v1-2_preprocessed/val_unseen/val_unseen_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b9497dc1aa6ab68073976f9678ac42a154b393edd9d0b0450d0ac90841f684c
3
+ size 1618489
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/process_instrs_to_bert_idx.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import gzip
3
+ import json
4
+ import copy
5
+
6
+ import sys
7
+ sys.path.append('/students/u5399302/research/Oscar/Oscar')
8
+ from transformers.pytorch_transformers import (BertTokenizer)
9
+ tokenizer_class = BertTokenizer
10
+ tokenizer = tokenizer_class.from_pretrained(
11
+ '/students/u5399302/research/Oscar/pretrained_models/base-no-labels/ep_67_588997',
12
+ do_lower_case=True
13
+ )
14
+ with open('./vocab.txt') as f:
15
+ bert_vocab = [line.rstrip() for line in f]
16
+
17
+
18
+ def pad_instr_tokens(instr_tokens, maxlength=20):
19
+ if len(instr_tokens) <= 2: #assert len(raw_instr_tokens) > 2
20
+ return None
21
+ if len(instr_tokens) > maxlength - 2: # -2 for [CLS] and [SEP]
22
+ instr_tokens = instr_tokens[:(maxlength-2)]
23
+ instr_tokens = ['[CLS]'] + instr_tokens + ['[SEP]']
24
+ instr_tokens += ['[PAD]'] * (maxlength-len(instr_tokens))
25
+ assert len(instr_tokens) == maxlength
26
+ return instr_tokens
27
+
28
+ ## ---------------------------------------------------------------------------
29
+
30
+ MAX_INPUT = 80
31
+
32
+ data_dir = glob.glob('./*/*_raw.json.gz')
33
+ for in_dir in data_dir:
34
+
35
+ with gzip.open(in_dir) as f:
36
+ data = json.load(f)
37
+ new_data = copy.deepcopy(data)
38
+
39
+ split = in_dir.split('/')[1]
40
+ print('Working on %s split ...'%(split))
41
+
42
+ # revise the vocabulary info
43
+ new_data['instruction_vocab']['word_list'] = bert_vocab
44
+ word2idx_dict = {}
45
+ for i, word in enumerate(bert_vocab):
46
+ word2idx_dict[word] = i
47
+ new_data['instruction_vocab']['word2idx_dict'] = word2idx_dict
48
+ new_data['instruction_vocab']['stoi'] = word2idx_dict
49
+ new_data['instruction_vocab']['itos'] = bert_vocab
50
+ new_data['instruction_vocab']['num_vocab'] = len(bert_vocab)
51
+ new_data['instruction_vocab']['UNK_INDEX'] = 100
52
+ new_data['instruction_vocab']['PAD_INDEX'] = 0
53
+
54
+
55
+ # process the instruction for each sample in the data split
56
+ for i, sample in enumerate(new_data['episodes']):
57
+ instr_text = sample['instruction']['instruction_text']
58
+
59
+ ''' BERT tokenizer '''
60
+ instr_tokens = tokenizer.tokenize(instr_text)
61
+ padded_instr_tokens = pad_instr_tokens(instr_tokens, MAX_INPUT)
62
+ instr_idxes = tokenizer.convert_tokens_to_ids(padded_instr_tokens)
63
+
64
+ new_data['episodes'][i]['instruction']['instruction_tokens'] = instr_idxes
65
+
66
+
67
+ json_str = json.dumps(new_data)
68
+ json_bytes = json_str.encode('utf-8')
69
+ with gzip.open('./%s/%s_bertidx.json.gz'%(split,split), 'w') as fout:
70
+ fout.write(json_bytes)
71
+
72
+ print('Done.')
73
+
74
+
75
+
76
+
77
+
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/test/test_bertidx.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b74845fcaa16044bb63ff436f67d8b1cdd1b883b821641f925c960bbb0301ff
3
+ size 960711
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/test/test_raw.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47ed2aea3b9f7534d084fb6f969f3a69a3a6663d651689c2fdaf0d0ad007ca5b
3
+ size 627311
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/train/train_bertidx.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b43db098cbf6bdf05fdccd9992b87a9aee58eef5c74adc62d92cfe636baf2f4d
3
+ size 2500100
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/train/train_raw.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9340166cfaef49885c859f260546a2f81bb4f7d970fd66847fb213178038be1
3
+ size 1942263
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/val_seen/val_seen_bertidx.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83b231674ae3a18cb02b3d65847377e978f45cc35682f807c03becc38b09410c
3
+ size 758273
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/val_seen/val_seen_raw.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f1c95a827675f9fe1240b03ac98fe2c7a5903a31253aa13f371a31c07ce915
3
+ size 181250
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/val_unseen/val_unseen_bertidx.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db0a39bef374cf5ce986473f8b35c9721f962c6c1a9af069257aa710df16365
3
+ size 928029
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/val_unseen/val_unseen_raw.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b48654aed06f0323340b6a3b3854ca98e049cb77e612b0b4e55f45a77f8e9a27
3
+ size 354389
datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
datasets/RxR_VLNCE_v0_enc_xlmr/test_challenge/test_challenge_guide.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2457d20e2c128d10fb916e54cebdaee912118afac1c04e1e272996a4e4ea1a3
3
+ size 3545315
datasets/RxR_VLNCE_v0_enc_xlmr/test_challenge/test_challenge_guide_raw.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:904a1e3e718ac87a7f21030d5355ff5af129773026a1e571862cd5a4da8d65e0
3
+ size 1710705
datasets/RxR_VLNCE_v0_enc_xlmr/train/train_follower.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c329fd107a261381659b79373cf8e42471e19957ef2790382cb7b6676b8657ab
3
+ size 64003645
datasets/RxR_VLNCE_v0_enc_xlmr/train/train_follower_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49c36f04f5b4a9dca6afa13ae10a67870404b0315654e997e749da03547d7489
3
+ size 87523962
datasets/RxR_VLNCE_v0_enc_xlmr/train/train_guide.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f06b2ef4dc947ca15d6c4a5a3d629c9212328f4cbdd38a13bed9c5c1fc224a94
3
+ size 26241241
datasets/RxR_VLNCE_v0_enc_xlmr/train/train_guide_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46373fec825aa09f6e26fb7ec7be372d105ed9b4d01e162bb23a18f928e8229c
3
+ size 76919902
datasets/RxR_VLNCE_v0_enc_xlmr/train/train_guide_raw.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe127cfe9350123e7ff511c858f866b2d538564bac910da698bfff4ee46be07e
3
+ size 55231485
datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_follower.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43b0f6b8ae4a9c72730002c790e02945b4dabc2a869f839549f80c0290691ebb
3
+ size 7155404
datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_follower_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6e22dcbe03fabad928ed6d47b5ed847ba4131a9fa4bda51034308782659cb02
3
+ size 9164309
datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_guide.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92703d91df6005c22a9d71823fac5deabdbfd619ea878e4265f8d40ed7abbeb7
3
+ size 2935739
datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_guide_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fe5b78762b7efbbdbaf15cd1821e6d710df92bdc17fabfc5a011a7e0b0fa781
3
+ size 6504149
datasets/RxR_VLNCE_v0_enc_xlmr/val_seen/val_seen_guide_raw.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:081e3fa5c2c4a120640c914b9428de268dea4464f0a246ab471c37783e9ba816
3
+ size 6156142
datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_follower.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63cf439ca4b1012f9f00e06f7188f62cb2b21345a92bb9e6e981d9da14ee6566
3
+ size 11352621
datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_follower_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efe95c4be13ee93222fc524d651d2e76078a2e8a3c98bf77b304cd702dd3ce56
3
+ size 15024042
datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_guide.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a110036736d0d7a3e4e899dbc6f2954a6ad349e80d442854aae7460940414151
3
+ size 4551924
datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_guide_gt.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdfbb624132501bf297eb0ea16a99e38682668ba5e9ce1554ca5cdcde6b65a0d
3
+ size 13167787
datasets/RxR_VLNCE_v0_enc_xlmr/val_unseen/val_unseen_guide_raw.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93abddce58e8232867a8738daa8107eb82953693a9d5e70779d1eb00386b222a
3
+ size 9690827
datasets/RxR_VLNCE_v0_enc_xlmr/xlm_tokenize.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import gzip
3
+ import json
4
+ from tqdm import tqdm
5
+ from transformers import AutoTokenizer
6
+
7
+ tok = AutoTokenizer.from_pretrained('xlm-roberta-base', do_lower_case=True)
8
+
9
+ fns = glob.glob('data/datasets/RxR_VLNCE_v0_enc_xlmr/*/*_raw.json.gz')
10
+ for fn in fns:
11
+ with gzip.open(fn) as f:
12
+ data = json.load(f)
13
+
14
+ split = fn.split('/')[-2]
15
+ print('Working on %s split ...' % (split) )
16
+
17
+ for i, ep in tqdm(enumerate(data['episodes']), total=len(data['episodes']) ):
18
+ instr_dict = ep['instruction']
19
+ instr_dict.pop('timed_instruction') if 'timed_instruction' in instr_dict else None
20
+ tok_output = tok(instr_dict['instruction_text'])
21
+ instr_dict['instruction_tokens'] = tok_output['input_ids']
22
+ data['episodes'][i]['instruction'] = instr_dict
23
+
24
+ json_str = json.dumps(data)
25
+ json_bytes = json_str.encode('utf-8')
26
+ with gzip.open('data/datasets/RxR_VLNCE_v0_enc_xlmr/%s/%s_guide.json.gz' % (split, split), 'w') as fout:
27
+ fout.write(json_bytes)
ddppo-models/gibson-2plus-resnet50.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6a600277efacf5fd98e293267221185d843eb3012aeff62fabfeee24c2bcdad
3
+ size 49853716
pretrained/ETP/mlm.sap_r2r/ckpts/model_step_82500.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be8709272e22fe0ffa98c8b2b0275dc5a0e3072f12e566c3994751733cf9f657
3
+ size 772962767
pretrained/ETP/mlm.sap_rxr/ckpts/model_step_90000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a9732d02f8b4cf22ebabfa91dcc9c195305d2687312e37a21be22fad49bf12d
3
+ size 2122331983