Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- 1_Pooling/config.json +7 -0
- README.md +130 -0
- config.json +27 -0
- config_sentence_transformers.json +7 -0
- epoch_losses.json +1 -0
- eval/binary_classification_evaluation_results.csv +10 -0
- model.safetensors +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +63 -0
- vocab.txt +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
vocab.txt filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": true,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false
|
7 |
+
}
|
README.md
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pipeline_tag: sentence-similarity
|
3 |
+
tags:
|
4 |
+
- sentence-transformers
|
5 |
+
- feature-extraction
|
6 |
+
- sentence-similarity
|
7 |
+
- transformers
|
8 |
+
|
9 |
+
---
|
10 |
+
|
11 |
+
# {MODEL_NAME}
|
12 |
+
|
13 |
+
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
|
14 |
+
|
15 |
+
<!--- Describe your model here -->
|
16 |
+
|
17 |
+
## Usage (Sentence-Transformers)
|
18 |
+
|
19 |
+
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
|
20 |
+
|
21 |
+
```
|
22 |
+
pip install -U sentence-transformers
|
23 |
+
```
|
24 |
+
|
25 |
+
Then you can use the model like this:
|
26 |
+
|
27 |
+
```python
|
28 |
+
from sentence_transformers import SentenceTransformer
|
29 |
+
sentences = ["This is an example sentence", "Each sentence is converted"]
|
30 |
+
|
31 |
+
model = SentenceTransformer('{MODEL_NAME}')
|
32 |
+
embeddings = model.encode(sentences)
|
33 |
+
print(embeddings)
|
34 |
+
```
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
## Usage (HuggingFace Transformers)
|
39 |
+
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
|
40 |
+
|
41 |
+
```python
|
42 |
+
from transformers import AutoTokenizer, AutoModel
|
43 |
+
import torch
|
44 |
+
|
45 |
+
|
46 |
+
# Max Pooling - Take the max value over time for every dimension.
|
47 |
+
def max_pooling(model_output, attention_mask):
|
48 |
+
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
|
49 |
+
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
50 |
+
token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value
|
51 |
+
return torch.max(token_embeddings, 1)[0]
|
52 |
+
|
53 |
+
|
54 |
+
# Sentences we want sentence embeddings for
|
55 |
+
sentences = ['This is an example sentence', 'Each sentence is converted']
|
56 |
+
|
57 |
+
# Load model from HuggingFace Hub
|
58 |
+
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
|
59 |
+
model = AutoModel.from_pretrained('{MODEL_NAME}')
|
60 |
+
|
61 |
+
# Tokenize sentences
|
62 |
+
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
63 |
+
|
64 |
+
# Compute token embeddings
|
65 |
+
with torch.no_grad():
|
66 |
+
model_output = model(**encoded_input)
|
67 |
+
|
68 |
+
# Perform pooling. In this case, max pooling.
|
69 |
+
sentence_embeddings = max_pooling(model_output, encoded_input['attention_mask'])
|
70 |
+
|
71 |
+
print("Sentence embeddings:")
|
72 |
+
print(sentence_embeddings)
|
73 |
+
```
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
## Evaluation Results
|
78 |
+
|
79 |
+
<!--- Describe how your model was evaluated -->
|
80 |
+
|
81 |
+
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
|
82 |
+
|
83 |
+
|
84 |
+
## Training
|
85 |
+
The model was trained with the parameters:
|
86 |
+
|
87 |
+
**DataLoader**:
|
88 |
+
|
89 |
+
`torch.utils.data.dataloader.DataLoader` of length 1229 with parameters:
|
90 |
+
```
|
91 |
+
{'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
|
92 |
+
```
|
93 |
+
|
94 |
+
**Loss**:
|
95 |
+
|
96 |
+
`sentence_transformers.losses.TripletLoss.TripletLoss` with parameters:
|
97 |
+
```
|
98 |
+
{'distance_metric': 'TripletDistanceMetric.EUCLIDEAN', 'triplet_margin': 5}
|
99 |
+
```
|
100 |
+
|
101 |
+
Parameters of the fit()-Method:
|
102 |
+
```
|
103 |
+
{
|
104 |
+
"epochs": 3,
|
105 |
+
"evaluation_steps": 500,
|
106 |
+
"evaluator": "sentence_transformers.evaluation.BinaryClassificationEvaluator.BinaryClassificationEvaluator",
|
107 |
+
"max_grad_norm": 1,
|
108 |
+
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
|
109 |
+
"optimizer_params": {
|
110 |
+
"lr": 3e-05
|
111 |
+
},
|
112 |
+
"scheduler": "WarmupLinear",
|
113 |
+
"steps_per_epoch": null,
|
114 |
+
"warmup_steps": 50,
|
115 |
+
"weight_decay": 0.01
|
116 |
+
}
|
117 |
+
```
|
118 |
+
|
119 |
+
|
120 |
+
## Full Model Architecture
|
121 |
+
```
|
122 |
+
SentenceTransformer(
|
123 |
+
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
|
124 |
+
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': True, 'pooling_mode_mean_sqrt_len_tokens': False})
|
125 |
+
)
|
126 |
+
```
|
127 |
+
|
128 |
+
## Citing & Authors
|
129 |
+
|
130 |
+
<!--- Describe where people can find more information -->
|
config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "dicta-il/dictabert",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"newmodern": true,
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 0,
|
21 |
+
"position_embedding_type": "absolute",
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.36.2",
|
24 |
+
"type_vocab_size": 2,
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 128000
|
27 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.2.2",
|
4 |
+
"transformers": "4.36.2",
|
5 |
+
"pytorch": "2.1.2+cu121"
|
6 |
+
}
|
7 |
+
}
|
epoch_losses.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"log_history": [{"epoch": 0, "loss": 0.7003634975294062}, {"epoch": 1, "loss": 0.16605059529728186}, {"epoch": 2, "loss": 0.043034195051239794}]}
|
eval/binary_classification_evaluation_results.csv
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
epoch,steps,cossim_accuracy,cossim_accuracy_threshold,cossim_f1,cossim_precision,cossim_recall,cossim_f1_threshold,cossim_ap,manhattan_accuracy,manhattan_accuracy_threshold,manhattan_f1,manhattan_precision,manhattan_recall,manhattan_f1_threshold,manhattan_ap,euclidean_accuracy,euclidean_accuracy_threshold,euclidean_f1,euclidean_precision,euclidean_recall,euclidean_f1_threshold,euclidean_ap,dot_accuracy,dot_accuracy_threshold,dot_f1,dot_precision,dot_recall,dot_f1_threshold,dot_ap
|
2 |
+
0,500,0.8988657844990549,0.6572055816650391,0.8972142170989432,0.9032882011605415,0.8912213740458015,0.6572055816650391,0.944560911694943,0.9083175803402647,326.732177734375,0.9068203650336215,0.9129593810444874,0.9007633587786259,326.732177734375,0.9542491234958791,0.9111531190926276,15.141329765319824,0.9087378640776699,0.924901185770751,0.8931297709923665,15.185525894165039,0.9534047230650327,0.8648393194706995,211.80429077148438,0.8679593721144967,0.8407871198568873,0.8969465648854962,211.63311767578125,0.8786261823859223
|
3 |
+
0,1000,0.9300567107750473,0.6528611779212952,0.9285714285714285,0.939453125,0.9179389312977099,0.6528611779212952,0.9737300976410953,0.941398865784499,345.21875,0.9406130268199234,0.9442307692307692,0.9370229007633588,345.21875,0.9808658333763999,0.9404536862003781,15.921335220336914,0.9390125847047434,0.9528487229862476,0.9255725190839694,16.045866012573242,0.9806521885152959,0.888468809073724,231.92935180664062,0.8888888888888888,0.8773234200743495,0.9007633587786259,231.92935180664062,0.9083308849892293
|
4 |
+
0,-1,0.9243856332703214,0.6674684882164001,0.9227836034318398,0.9219047619047619,0.9236641221374046,0.6389604806900024,0.972027662686941,0.9395085066162571,335.3438720703125,0.938388625592417,0.9322033898305084,0.9446564885496184,348.75653076171875,0.9811218887601132,0.9404536862003781,16.17607879638672,0.938894277400582,0.9546351084812623,0.9236641221374046,16.17607879638672,0.9802486625861153,0.8818525519848771,235.76617431640625,0.8856358645928636,0.8506151142355008,0.9236641221374046,235.76617431640625,0.8797209764276414
|
5 |
+
1,500,0.9120982986767486,0.5920199155807495,0.9121813031161473,0.902803738317757,0.9217557251908397,0.5893062353134155,0.9667693463229743,0.9243856332703214,344.0145263671875,0.9233716475095785,0.926923076923077,0.9198473282442748,350.230712890625,0.9749825312208363,0.9243856332703214,17.552833557128906,0.9238095238095239,0.9220532319391636,0.9255725190839694,17.552833557128906,0.9753406657887319,0.8771266540642723,223.262451171875,0.879182156133829,0.8568840579710145,0.9026717557251909,218.62855529785156,0.8911259610239978
|
6 |
+
1,1000,0.9206049149338374,0.6353754997253418,0.9184466019417477,0.9347826086956522,0.9026717557251909,0.6248494386672974,0.9725106368499086,0.9404536862003781,359.49932861328125,0.9400570884871551,0.937381404174573,0.9427480916030534,360.86175537109375,0.9812235460956586,0.9395085066162571,18.260955810546875,0.9396226415094339,0.9291044776119403,0.950381679389313,18.32391929626465,0.9807002892852764,0.8856332703213611,235.46749877929688,0.888479262672811,0.8591800356506238,0.9198473282442748,221.70233154296875,0.8993586512879576
|
7 |
+
1,-1,0.9111531190926276,0.6492329239845276,0.9089165867689357,0.9132947976878613,0.9045801526717557,0.6242419481277466,0.968144178075093,0.9319470699432892,375.5537109375,0.9329608938547486,0.9109090909090909,0.9561068702290076,375.5537109375,0.9795218545056252,0.9319470699432892,17.96462631225586,0.9322033898305085,0.9200743494423792,0.9446564885496184,17.96462631225586,0.9786044450498129,0.8742911153119093,229.0291748046875,0.8769657724329325,0.8509874326750448,0.9045801526717557,229.0291748046875,0.8709702767617598
|
8 |
+
2,500,0.9149338374291115,0.587235152721405,0.9160447761194029,0.8959854014598541,0.9370229007633588,0.5855621695518494,0.9670082558410162,0.9385633270321361,363.227294921875,0.9386213408876298,0.9289719626168225,0.9484732824427481,364.4090576171875,0.9779200733984246,0.9366729678638941,18.57184600830078,0.9375582479030755,0.9162112932604736,0.9599236641221374,18.57184600830078,0.9769772074830768,0.8742911153119093,237.5072479248047,0.8769657724329325,0.8509874326750448,0.9045801526717557,236.1907196044922,0.8710122088055272
|
9 |
+
2,1000,0.9196597353497165,0.6004956960678101,0.9198868991517437,0.9087523277467412,0.9312977099236641,0.5983362793922424,0.9711544148164128,0.943289224952741,371.24688720703125,0.9435028248587569,0.9312267657992565,0.9561068702290076,371.24688720703125,0.9811158571018329,0.9404536862003781,18.651973724365234,0.9408450704225352,0.9260628465804066,0.9561068702290076,18.651973724365234,0.9802401197414027,0.8790170132325141,250.70159912109375,0.8802946593001842,0.8505338078291815,0.9122137404580153,233.8839874267578,0.8766882017532465
|
10 |
+
2,-1,0.9215500945179584,0.6059660911560059,0.9219330855018587,0.8985507246376812,0.9465648854961832,0.5740522146224976,0.9718580893477863,0.945179584120983,372.1387023925781,0.9452830188679245,0.9347014925373134,0.9561068702290076,372.1387023925781,0.9815518873468673,0.94234404536862,18.611305236816406,0.9427230046948356,0.9279112754158965,0.9580152671755725,18.611305236816406,0.9807262410767907,0.8818525519848771,247.81253051757812,0.8830935251798561,0.8350340136054422,0.9370229007633588,220.95921325683594,0.8813355917517796
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6fd958577210a765e760d0cc0afcafe1e4ffd0dfd916e589961a53e480d711ed
|
3 |
+
size 737403752
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[UNK]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "[CLS]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "[SEP]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "[PAD]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"4": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
},
|
43 |
+
"5": {
|
44 |
+
"content": "[BLANK]",
|
45 |
+
"lstrip": false,
|
46 |
+
"normalized": false,
|
47 |
+
"rstrip": false,
|
48 |
+
"single_word": false,
|
49 |
+
"special": true
|
50 |
+
}
|
51 |
+
},
|
52 |
+
"clean_up_tokenization_spaces": true,
|
53 |
+
"cls_token": "[CLS]",
|
54 |
+
"do_lower_case": true,
|
55 |
+
"mask_token": "[MASK]",
|
56 |
+
"model_max_length": 512,
|
57 |
+
"pad_token": "[PAD]",
|
58 |
+
"sep_token": "[SEP]",
|
59 |
+
"strip_accents": null,
|
60 |
+
"tokenize_chinese_chars": true,
|
61 |
+
"tokenizer_class": "BertTokenizer",
|
62 |
+
"unk_token": "[UNK]"
|
63 |
+
}
|
vocab.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0fb90bfa35244d26f0065d1fcd0b5becc3da3d44d616a7e2aacaf6320b9fa2d0
|
3 |
+
size 1500244
|