LysandreJik commited on
Commit
7469798
1 Parent(s): 206aa95
config.json CHANGED
@@ -34,7 +34,7 @@
34
  "positive_weight": 10.0,
35
  "reset_position_index_per_cell": true,
36
  "select_one_column": true,
37
- "transformers_version": "4.10.0.dev0",
38
  "type_vocab_sizes": [
39
  3,
40
  256,
@@ -49,5 +49,5 @@
49
  "use_gumbel_for_aggregation": false,
50
  "use_gumbel_for_cells": false,
51
  "use_normalized_answer_loss": false,
52
- "vocab_size": 99
53
  }
34
  "positive_weight": 10.0,
35
  "reset_position_index_per_cell": true,
36
  "select_one_column": true,
37
+ "transformers_version": "4.11.0.dev0",
38
  "type_vocab_sizes": [
39
  3,
40
  256,
49
  "use_gumbel_for_aggregation": false,
50
  "use_gumbel_for_cells": false,
51
  "use_normalized_answer_loss": false,
52
+ "vocab_size": 30522
53
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9cb89f18aab06d7a594507c90672d1d6d07bdfa71eb6561b9122ed856481c14
3
- size 396403
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3b1f9191fc1c0f87cebcd7ca15f317a05e15cd69ebd0d567fe609b0f3441b9f
3
+ size 8312299
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "additional_special_tokens": ["[EMPTY]"]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "empty_token": "[EMPTY]", "tokenize_chinese_chars": true, "strip_accents": null, "cell_trim_length": -1, "max_column_id": null, "max_row_id": null, "strip_column_names": false, "update_answer_coordinates": false, "min_question_length": null, "max_question_length": null, "model_max_length": 512, "additional_special_tokens": ["[EMPTY]"], "drop_rows_to_fit": false, "special_tokens_map_file": "/home/lysandre/.cache/huggingface/transformers/b2ee158b53c69f20f21c961e5b2983ee98ab7cd0de03b6965d82a87b9f57dedf.852c05acd4c087ec9774e7ed56aeea5010c13056cc8bc37594b75b172416592c", "tokenizer_file": null, "name_or_path": "google/tapas-base", "tokenizer_class": "TapasTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff