cfierro commited on
Commit
d819933
·
1 Parent(s): 9a16aaf

Add FlanT5 and Llama2 predictions

Browse files
Files changed (31) hide show
  1. .gitattributes +15 -0
  2. fm_queries_v2/flant5-xxl_0--google-flan-t5-xxl/args.json +9 -0
  3. fm_queries_v2/flant5-xxl_0--google-flan-t5-xxl/predictions.json +3 -0
  4. fm_queries_v2/flant5-xxl_0--google-flan-t5-xxl/raw_predictions.json +0 -0
  5. fm_queries_v2/flant5-xxl_1--google-flan-t5-xxl/args.json +9 -0
  6. fm_queries_v2/flant5-xxl_1--google-flan-t5-xxl/predictions.json +3 -0
  7. fm_queries_v2/flant5-xxl_1--google-flan-t5-xxl/raw_predictions.json +0 -0
  8. fm_queries_v2/flant5-xxl_2--google-flan-t5-xxl/args.json +9 -0
  9. fm_queries_v2/flant5-xxl_2--google-flan-t5-xxl/predictions.json +3 -0
  10. fm_queries_v2/flant5-xxl_2--google-flan-t5-xxl/raw_predictions.json +0 -0
  11. fm_queries_v2/flant5-xxl_3--google-flan-t5-xxl/args.json +9 -0
  12. fm_queries_v2/flant5-xxl_3--google-flan-t5-xxl/predictions.json +3 -0
  13. fm_queries_v2/flant5-xxl_3--google-flan-t5-xxl/raw_predictions.json +0 -0
  14. fm_queries_v2/flant5-xxl_4--google-flan-t5-xxl/args.json +9 -0
  15. fm_queries_v2/flant5-xxl_4--google-flan-t5-xxl/predictions.json +3 -0
  16. fm_queries_v2/flant5-xxl_4--google-flan-t5-xxl/raw_predictions.json +0 -0
  17. fm_queries_v2/llama2_0--meta-llama-Llama-2-7b-hf/args.json +9 -0
  18. fm_queries_v2/llama2_0--meta-llama-Llama-2-7b-hf/predictions.json +3 -0
  19. fm_queries_v2/llama2_0--meta-llama-Llama-2-7b-hf/raw_predictions.json +3 -0
  20. fm_queries_v2/llama2_1--meta-llama-Llama-2-7b-hf/args.json +9 -0
  21. fm_queries_v2/llama2_1--meta-llama-Llama-2-7b-hf/predictions.json +3 -0
  22. fm_queries_v2/llama2_1--meta-llama-Llama-2-7b-hf/raw_predictions.json +3 -0
  23. fm_queries_v2/llama2_2--meta-llama-Llama-2-7b-hf/args.json +9 -0
  24. fm_queries_v2/llama2_2--meta-llama-Llama-2-7b-hf/predictions.json +3 -0
  25. fm_queries_v2/llama2_2--meta-llama-Llama-2-7b-hf/raw_predictions.json +3 -0
  26. fm_queries_v2/llama2_3--meta-llama-Llama-2-7b-hf/args.json +9 -0
  27. fm_queries_v2/llama2_3--meta-llama-Llama-2-7b-hf/predictions.json +3 -0
  28. fm_queries_v2/llama2_3--meta-llama-Llama-2-7b-hf/raw_predictions.json +3 -0
  29. fm_queries_v2/llama2_4--meta-llama-Llama-2-7b-hf/args.json +9 -0
  30. fm_queries_v2/llama2_4--meta-llama-Llama-2-7b-hf/predictions.json +3 -0
  31. fm_queries_v2/llama2_4--meta-llama-Llama-2-7b-hf/raw_predictions.json +3 -0
.gitattributes CHANGED
@@ -123,3 +123,18 @@ fm_queries_v2/alpaca_fmv2_update23Oct_1---projects-nlp-data-constanzam-stanford_
123
  fm_queries_v2/alpaca_fmv2_update23Oct_1---projects-nlp-data-constanzam-stanford_alpaca-huggingface-ckpts-7B/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
124
  fm_queries_v2/alpaca_fmv2_update23Oct_2---projects-nlp-data-constanzam-stanford_alpaca-huggingface-ckpts-7B/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
125
  fm_queries_v2/alpaca_fmv2_update23Oct_4---projects-nlp-data-constanzam-stanford_alpaca-huggingface-ckpts-7B/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  fm_queries_v2/alpaca_fmv2_update23Oct_1---projects-nlp-data-constanzam-stanford_alpaca-huggingface-ckpts-7B/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
124
  fm_queries_v2/alpaca_fmv2_update23Oct_2---projects-nlp-data-constanzam-stanford_alpaca-huggingface-ckpts-7B/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
125
  fm_queries_v2/alpaca_fmv2_update23Oct_4---projects-nlp-data-constanzam-stanford_alpaca-huggingface-ckpts-7B/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
126
+ fm_queries_v2/flant5-xxl_0--google-flan-t5-xxl/predictions.json filter=lfs diff=lfs merge=lfs -text
127
+ fm_queries_v2/flant5-xxl_1--google-flan-t5-xxl/predictions.json filter=lfs diff=lfs merge=lfs -text
128
+ fm_queries_v2/flant5-xxl_2--google-flan-t5-xxl/predictions.json filter=lfs diff=lfs merge=lfs -text
129
+ fm_queries_v2/flant5-xxl_3--google-flan-t5-xxl/predictions.json filter=lfs diff=lfs merge=lfs -text
130
+ fm_queries_v2/flant5-xxl_4--google-flan-t5-xxl/predictions.json filter=lfs diff=lfs merge=lfs -text
131
+ fm_queries_v2/llama2_0--meta-llama-Llama-2-7b-hf/predictions.json filter=lfs diff=lfs merge=lfs -text
132
+ fm_queries_v2/llama2_0--meta-llama-Llama-2-7b-hf/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
133
+ fm_queries_v2/llama2_1--meta-llama-Llama-2-7b-hf/predictions.json filter=lfs diff=lfs merge=lfs -text
134
+ fm_queries_v2/llama2_1--meta-llama-Llama-2-7b-hf/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
135
+ fm_queries_v2/llama2_2--meta-llama-Llama-2-7b-hf/predictions.json filter=lfs diff=lfs merge=lfs -text
136
+ fm_queries_v2/llama2_2--meta-llama-Llama-2-7b-hf/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
137
+ fm_queries_v2/llama2_3--meta-llama-Llama-2-7b-hf/predictions.json filter=lfs diff=lfs merge=lfs -text
138
+ fm_queries_v2/llama2_3--meta-llama-Llama-2-7b-hf/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
139
+ fm_queries_v2/llama2_4--meta-llama-Llama-2-7b-hf/predictions.json filter=lfs diff=lfs merge=lfs -text
140
+ fm_queries_v2/llama2_4--meta-llama-Llama-2-7b-hf/raw_predictions.json filter=lfs diff=lfs merge=lfs -text
fm_queries_v2/flant5-xxl_0--google-flan-t5-xxl/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_0.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "flant5-xxl_0",
7
+ "model_name_or_path": "google/flan-t5-xxl",
8
+ "cache_dir": null
9
+ }
fm_queries_v2/flant5-xxl_0--google-flan-t5-xxl/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1999f04af0838f0e5e8843b3d2b3d932dcaa3501d6ece4aff415cdb22268202
3
+ size 13379588
fm_queries_v2/flant5-xxl_0--google-flan-t5-xxl/raw_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
fm_queries_v2/flant5-xxl_1--google-flan-t5-xxl/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_1.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "flant5-xxl_1",
7
+ "model_name_or_path": "google/flan-t5-xxl",
8
+ "cache_dir": null
9
+ }
fm_queries_v2/flant5-xxl_1--google-flan-t5-xxl/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f57c55c90aee5d117b1bb72a22434704f7b58155d4b5c49203222cc3e2223a
3
+ size 13884176
fm_queries_v2/flant5-xxl_1--google-flan-t5-xxl/raw_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
fm_queries_v2/flant5-xxl_2--google-flan-t5-xxl/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_2.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "flant5-xxl_2",
7
+ "model_name_or_path": "google/flan-t5-xxl",
8
+ "cache_dir": null
9
+ }
fm_queries_v2/flant5-xxl_2--google-flan-t5-xxl/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4a355c5f8cdf0f98eaefa8d35a3caef9ba7814b99335d888137c117d0761e61
3
+ size 14533166
fm_queries_v2/flant5-xxl_2--google-flan-t5-xxl/raw_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
fm_queries_v2/flant5-xxl_3--google-flan-t5-xxl/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_3.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "flant5-xxl_3",
7
+ "model_name_or_path": "google/flan-t5-xxl",
8
+ "cache_dir": null
9
+ }
fm_queries_v2/flant5-xxl_3--google-flan-t5-xxl/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d18e42313d1853a9f177aa909cd39b49723ff7a2875c70548d1f1cfbfe878233
3
+ size 14005624
fm_queries_v2/flant5-xxl_3--google-flan-t5-xxl/raw_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
fm_queries_v2/flant5-xxl_4--google-flan-t5-xxl/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_4.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "flant5-xxl_4",
7
+ "model_name_or_path": "google/flan-t5-xxl",
8
+ "cache_dir": null
9
+ }
fm_queries_v2/flant5-xxl_4--google-flan-t5-xxl/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9392000ebc644dfaa1470b3a030b9b9e7d853cd3ee18823c38167a9c273361f
3
+ size 14030404
fm_queries_v2/flant5-xxl_4--google-flan-t5-xxl/raw_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
fm_queries_v2/llama2_0--meta-llama-Llama-2-7b-hf/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_0.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "llama2_0",
7
+ "model_name_or_path": "meta-llama/Llama-2-7b-hf",
8
+ "cache_dir": "/projects/nlp/data/pmh864/checkpoints/backbones/huggingface/"
9
+ }
fm_queries_v2/llama2_0--meta-llama-Llama-2-7b-hf/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5c4649ce639b3e48c519ba91b3d2f995c669a766198d1b41e2a52c669b3c67a
3
+ size 69246830
fm_queries_v2/llama2_0--meta-llama-Llama-2-7b-hf/raw_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7da52684ccced6e8a754667f13502c761ab09e64886f7db81f0b232e519b5a3a
3
+ size 34926906
fm_queries_v2/llama2_1--meta-llama-Llama-2-7b-hf/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_1.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "llama2_1",
7
+ "model_name_or_path": "meta-llama/Llama-2-7b-hf",
8
+ "cache_dir": "/projects/nlp/data/pmh864/checkpoints/backbones/huggingface/"
9
+ }
fm_queries_v2/llama2_1--meta-llama-Llama-2-7b-hf/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ca206e955609104c7bb401873424f9661499b5fd5d2d89d1768e52b9b5d8264
3
+ size 69398276
fm_queries_v2/llama2_1--meta-llama-Llama-2-7b-hf/raw_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78f7f23be84fd5d4b5e68baa44a460414efbe770571fd8df95419ab78951e59e
3
+ size 35232879
fm_queries_v2/llama2_2--meta-llama-Llama-2-7b-hf/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_2.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "llama2_2",
7
+ "model_name_or_path": "meta-llama/Llama-2-7b-hf",
8
+ "cache_dir": "/projects/nlp/data/pmh864/checkpoints/backbones/huggingface/"
9
+ }
fm_queries_v2/llama2_2--meta-llama-Llama-2-7b-hf/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e39c51eabdce2d1a12bd88010ccbf6e1b1ad2590592f53e15a24b298285196fb
3
+ size 69715488
fm_queries_v2/llama2_2--meta-llama-Llama-2-7b-hf/raw_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b45e2059b3f1866dc239027c0dc6742eb7d78370fcfaf5d8c2833db0d0956576
3
+ size 35678899
fm_queries_v2/llama2_3--meta-llama-Llama-2-7b-hf/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_3.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "llama2_3",
7
+ "model_name_or_path": "meta-llama/Llama-2-7b-hf",
8
+ "cache_dir": "/projects/nlp/data/pmh864/checkpoints/backbones/huggingface/"
9
+ }
fm_queries_v2/llama2_3--meta-llama-Llama-2-7b-hf/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12ae50ccc53c32728fb38f2714f9b2880219deb350b406dba9cc20465232b822
3
+ size 69870126
fm_queries_v2/llama2_3--meta-llama-Llama-2-7b-hf/raw_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaedccb68e6d2609554fa720d787932e47a91a00b31f39b638afce3e963aaaba
3
+ size 36060977
fm_queries_v2/llama2_4--meta-llama-Llama-2-7b-hf/args.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries_path": "data/fm_queries_31Oct/fm_queries_4.txt",
3
+ "template": "query_in_response",
4
+ "instruction": "Complete the fact in as few words as possible",
5
+ "output_dir": "./outputs/fm_queries",
6
+ "exp_name": "llama2_4",
7
+ "model_name_or_path": "meta-llama/Llama-2-7b-hf",
8
+ "cache_dir": "/projects/nlp/data/pmh864/checkpoints/backbones/huggingface/"
9
+ }
fm_queries_v2/llama2_4--meta-llama-Llama-2-7b-hf/predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47a238e825da94d07d48dce8e8ed0483a01a8cf60c2ac6c89efb2c823f5b3165
3
+ size 69774203
fm_queries_v2/llama2_4--meta-llama-Llama-2-7b-hf/raw_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d3e5e9874bcd340301c6d4e6f1a5515554e5644516dd557f1ec2ac68cd66f47
3
+ size 35635895