CarlosMorales commited on
Commit
643ba24
1 Parent(s): b02bee0

Add new SentenceTransformer model.

Browse files
Files changed (3) hide show
  1. config.json +1 -1
  2. model.safetensors +1 -1
  3. tokenizer_config.json +0 -7
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "../models/BAAI/bge-large-en-v1.5_finetuned",
3
  "architectures": [
4
  "BertModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "BAAI/bge-large-en-v1.5",
3
  "architectures": [
4
  "BertModel"
5
  ],
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2a312c3fe543a350583ac7e98708e36767c0d6a8e8c86c0eeb9db4e68d1f830
3
  size 1340612432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75194409ebed3fcedf5e41f51dbc05bf75795588bd2f94676b863c4e97edb10d
3
  size 1340612432
tokenizer_config.json CHANGED
@@ -46,19 +46,12 @@
46
  "do_basic_tokenize": true,
47
  "do_lower_case": true,
48
  "mask_token": "[MASK]",
49
- "max_length": 512,
50
  "model_max_length": 512,
51
  "never_split": null,
52
- "pad_to_multiple_of": null,
53
  "pad_token": "[PAD]",
54
- "pad_token_type_id": 0,
55
- "padding_side": "right",
56
  "sep_token": "[SEP]",
57
- "stride": 0,
58
  "strip_accents": null,
59
  "tokenize_chinese_chars": true,
60
  "tokenizer_class": "BertTokenizer",
61
- "truncation_side": "right",
62
- "truncation_strategy": "longest_first",
63
  "unk_token": "[UNK]"
64
  }
 
46
  "do_basic_tokenize": true,
47
  "do_lower_case": true,
48
  "mask_token": "[MASK]",
 
49
  "model_max_length": 512,
50
  "never_split": null,
 
51
  "pad_token": "[PAD]",
 
 
52
  "sep_token": "[SEP]",
 
53
  "strip_accents": null,
54
  "tokenize_chinese_chars": true,
55
  "tokenizer_class": "BertTokenizer",
 
 
56
  "unk_token": "[UNK]"
57
  }