modelId
stringlengths 4
112
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 21
values | files
sequence | publishedBy
stringlengths 2
37
| downloads_last_month
int32 0
9.44M
| library
stringclasses 15
values | modelCard
large_stringlengths 0
100k
|
---|---|---|---|---|---|---|---|---|
zanderbush/ForceWords2 | 2021-05-23T13:57:08.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"log_history.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zanderbush | 14 | transformers | |
zanderbush/ForceWordsArvix | 2021-05-23T13:59:12.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"log_history.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zanderbush | 17 | transformers | |
zanderbush/ForceWordsT5 | 2020-12-07T02:59:10.000Z | [
"pytorch",
"t5",
"lm-head",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin"
] | zanderbush | 10 | transformers | |
zanderbush/GPTTitle | 2021-05-23T14:00:00.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"vocab.json"
] | zanderbush | 12 | transformers | |
zanderbush/Intellectual | 2021-05-23T14:01:00.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
] | zanderbush | 16 | transformers | |
zanderbush/Paraphrase | 2021-05-23T14:02:13.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
] | zanderbush | 509 | transformers | {Input} -> {Output}
from transformers import pipeline
generator = pipeline('text-generation',model='zandebush/Paraphrase')
generator("New York is home to the New York Knicks. ->", num_return_sequences=5)
Note: T5 is likely better suited for paraphrasing. That said, this model will afford you mediocre results.
|
zanderbush/T5ForceWords | 2021-01-11T22:59:04.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin"
] | zanderbush | 6 | transformers | |
zanderbush/T6 | 2021-02-15T21:03:57.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin"
] | zanderbush | 8 | transformers | |
zanderbush/T7 | 2021-02-22T03:37:18.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin"
] | zanderbush | 8 | transformers | |
zanderbush/TryTokenizer | 2021-05-23T14:02:44.000Z | [
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"merges.txt",
"tokenizer_config.json",
"vocab.json"
] | zanderbush | 8 | transformers | |
zanderbush/VBG_GPT2 | 2021-05-23T14:03:30.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin"
] | zanderbush | 8 | transformers | |
zanderbush/VBG_T5 | 2021-02-09T02:55:58.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin"
] | zanderbush | 8 | transformers | |
zanelim/singbert-large-sg | 2021-05-20T09:36:17.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"pretraining",
"en",
"dataset:reddit singapore, malaysia",
"dataset:hardwarezone",
"transformers",
"singapore",
"sg",
"singlish",
"malaysia",
"ms",
"manglish",
"bert-large-uncased",
"license:mit"
] | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
] | zanelim | 338 | transformers | ---
language: en
tags:
- singapore
- sg
- singlish
- malaysia
- ms
- manglish
- bert-large-uncased
license: mit
datasets:
- reddit singapore, malaysia
- hardwarezone
widget:
- text: "kopi c siew [MASK]"
- text: "die [MASK] must try"
---
# Model name
SingBert Large - Bert for Singlish (SG) and Manglish (MY).
## Model description
Similar to [SingBert](https://huggingface.co/zanelim/singbert) but the large version, which was initialized from [BERT large uncased (whole word masking)](https://github.com/google-research/bert#pre-trained-models), with pre-training finetuned on
[singlish](https://en.wikipedia.org/wiki/Singlish) and [manglish](https://en.wikipedia.org/wiki/Manglish) data.
## Intended uses & limitations
#### How to use
```python
>>> from transformers import pipeline
>>> nlp = pipeline('fill-mask', model='zanelim/singbert-large-sg')
>>> nlp("kopi c siew [MASK]")
[{'sequence': '[CLS] kopi c siew dai [SEP]',
'score': 0.9003700017929077,
'token': 18765,
'token_str': 'dai'},
{'sequence': '[CLS] kopi c siew mai [SEP]',
'score': 0.0779474675655365,
'token': 14736,
'token_str': 'mai'},
{'sequence': '[CLS] kopi c siew. [SEP]',
'score': 0.0032227332703769207,
'token': 1012,
'token_str': '.'},
{'sequence': '[CLS] kopi c siew bao [SEP]',
'score': 0.0017727474914863706,
'token': 25945,
'token_str': 'bao'},
{'sequence': '[CLS] kopi c siew peng [SEP]',
'score': 0.0012526646023616195,
'token': 26473,
'token_str': 'peng'}]
>>> nlp("one teh c siew dai, and one kopi [MASK]")
[{'sequence': '[CLS] one teh c siew dai, and one kopi. [SEP]',
'score': 0.5249741077423096,
'token': 1012,
'token_str': '.'},
{'sequence': '[CLS] one teh c siew dai, and one kopi o [SEP]',
'score': 0.27349168062210083,
'token': 1051,
'token_str': 'o'},
{'sequence': '[CLS] one teh c siew dai, and one kopi peng [SEP]',
'score': 0.057190295308828354,
'token': 26473,
'token_str': 'peng'},
{'sequence': '[CLS] one teh c siew dai, and one kopi c [SEP]',
'score': 0.04022320732474327,
'token': 1039,
'token_str': 'c'},
{'sequence': '[CLS] one teh c siew dai, and one kopi? [SEP]',
'score': 0.01191170234233141,
'token': 1029,
'token_str': '?'}]
>>> nlp("die [MASK] must try")
[{'sequence': '[CLS] die die must try [SEP]',
'score': 0.9921030402183533,
'token': 3280,
'token_str': 'die'},
{'sequence': '[CLS] die also must try [SEP]',
'score': 0.004993876442313194,
'token': 2036,
'token_str': 'also'},
{'sequence': '[CLS] die liao must try [SEP]',
'score': 0.000317625846946612,
'token': 727,
'token_str': 'liao'},
{'sequence': '[CLS] die still must try [SEP]',
'score': 0.0002260878391098231,
'token': 2145,
'token_str': 'still'},
{'sequence': '[CLS] die i must try [SEP]',
'score': 0.00016935862367972732,
'token': 1045,
'token_str': 'i'}]
>>> nlp("dont play [MASK] leh")
[{'sequence': '[CLS] dont play play leh [SEP]',
'score': 0.9079819321632385,
'token': 2377,
'token_str': 'play'},
{'sequence': '[CLS] dont play punk leh [SEP]',
'score': 0.006846973206847906,
'token': 7196,
'token_str': 'punk'},
{'sequence': '[CLS] dont play games leh [SEP]',
'score': 0.004041737411171198,
'token': 2399,
'token_str': 'games'},
{'sequence': '[CLS] dont play politics leh [SEP]',
'score': 0.003728888463228941,
'token': 4331,
'token_str': 'politics'},
{'sequence': '[CLS] dont play cheat leh [SEP]',
'score': 0.0032805048394948244,
'token': 21910,
'token_str': 'cheat'}]
>>> nlp("confirm plus [MASK]")
{'sequence': '[CLS] confirm plus chop [SEP]',
'score': 0.9749826192855835,
'token': 24494,
'token_str': 'chop'},
{'sequence': '[CLS] confirm plus chopped [SEP]',
'score': 0.017554156482219696,
'token': 24881,
'token_str': 'chopped'},
{'sequence': '[CLS] confirm plus minus [SEP]',
'score': 0.002725469646975398,
'token': 15718,
'token_str': 'minus'},
{'sequence': '[CLS] confirm plus guarantee [SEP]',
'score': 0.000900257145985961,
'token': 11302,
'token_str': 'guarantee'},
{'sequence': '[CLS] confirm plus one [SEP]',
'score': 0.0004384620988275856,
'token': 2028,
'token_str': 'one'}]
>>> nlp("catch no [MASK]")
[{'sequence': '[CLS] catch no ball [SEP]',
'score': 0.9381157159805298,
'token': 3608,
'token_str': 'ball'},
{'sequence': '[CLS] catch no balls [SEP]',
'score': 0.060842301696538925,
'token': 7395,
'token_str': 'balls'},
{'sequence': '[CLS] catch no fish [SEP]',
'score': 0.00030917322146706283,
'token': 3869,
'token_str': 'fish'},
{'sequence': '[CLS] catch no breath [SEP]',
'score': 7.552534952992573e-05,
'token': 3052,
'token_str': 'breath'},
{'sequence': '[CLS] catch no tail [SEP]',
'score': 4.208395694149658e-05,
'token': 5725,
'token_str': 'tail'}]
```
Here is how to use this model to get the features of a given text in PyTorch:
```python
from transformers import BertTokenizer, BertModel
tokenizer = BertTokenizer.from_pretrained('zanelim/singbert-large-sg')
model = BertModel.from_pretrained("zanelim/singbert-large-sg")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
```
and in TensorFlow:
```python
from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained("zanelim/singbert-large-sg")
model = TFBertModel.from_pretrained("zanelim/singbert-large-sg")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='tf')
output = model(encoded_input)
```
#### Limitations and bias
This model was finetuned on colloquial Singlish and Manglish corpus, hence it is best applied on downstream tasks involving the main
constituent languages- english, mandarin, malay. Also, as the training data is mainly from forums, beware of existing inherent bias.
## Training data
Colloquial singlish and manglish (both are a mixture of English, Mandarin, Tamil, Malay, and other local dialects like Hokkien, Cantonese or Teochew)
corpus. The corpus is collected from subreddits- `r/singapore` and `r/malaysia`, and forums such as `hardwarezone`.
## Training procedure
Initialized with [bert large uncased (whole word masking)](https://github.com/google-research/bert#pre-trained-models) vocab and checkpoints (pre-trained weights).
Top 1000 custom vocab tokens (non-overlapped with original bert vocab) were further extracted from training data and filled into unused tokens in original bert vocab.
Pre-training was further finetuned on training data with the following hyperparameters
* train_batch_size: 512
* max_seq_length: 128
* num_train_steps: 300000
* num_warmup_steps: 5000
* learning_rate: 2e-5
* hardware: TPU v3-8
|
|
zanelim/singbert-lite-sg | 2020-12-11T22:05:08.000Z | [
"pytorch",
"tf",
"albert",
"pretraining",
"en",
"dataset:reddit singapore, malaysia",
"dataset:hardwarezone",
"transformers",
"singapore",
"sg",
"singlish",
"malaysia",
"ms",
"manglish",
"albert-base-v2",
"license:mit"
] | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tf_model.h5",
"tokenizer_config.json"
] | zanelim | 105 | transformers | ---
language: en
tags:
- singapore
- sg
- singlish
- malaysia
- ms
- manglish
- albert-base-v2
license: mit
datasets:
- reddit singapore, malaysia
- hardwarezone
widget:
- text: "dont play [MASK] leh"
- text: "die [MASK] must try"
---
# Model name
SingBert Lite - Bert for Singlish (SG) and Manglish (MY).
## Model description
Similar to [SingBert](https://huggingface.co/zanelim/singbert) but the lite-version, which was initialized from [Albert base v2](https://github.com/google-research/albert#albert), with pre-training finetuned on
[singlish](https://en.wikipedia.org/wiki/Singlish) and [manglish](https://en.wikipedia.org/wiki/Manglish) data.
## Intended uses & limitations
#### How to use
```python
>>> from transformers import pipeline
>>> nlp = pipeline('fill-mask', model='zanelim/singbert-lite-sg')
>>> nlp("die [MASK] must try")
[{'sequence': '[CLS] die die must try[SEP]',
'score': 0.7731555700302124,
'token': 1327,
'token_str': '▁die'},
{'sequence': '[CLS] die also must try[SEP]',
'score': 0.04763784259557724,
'token': 67,
'token_str': '▁also'},
{'sequence': '[CLS] die still must try[SEP]',
'score': 0.01859409362077713,
'token': 174,
'token_str': '▁still'},
{'sequence': '[CLS] die u must try[SEP]',
'score': 0.015824034810066223,
'token': 287,
'token_str': '▁u'},
{'sequence': '[CLS] die is must try[SEP]',
'score': 0.011271446943283081,
'token': 25,
'token_str': '▁is'}]
>>> nlp("dont play [MASK] leh")
[{'sequence': '[CLS] dont play play leh[SEP]',
'score': 0.4365769624710083,
'token': 418,
'token_str': '▁play'},
{'sequence': '[CLS] dont play punk leh[SEP]',
'score': 0.06880936771631241,
'token': 6769,
'token_str': '▁punk'},
{'sequence': '[CLS] dont play game leh[SEP]',
'score': 0.051739856600761414,
'token': 250,
'token_str': '▁game'},
{'sequence': '[CLS] dont play games leh[SEP]',
'score': 0.045703962445259094,
'token': 466,
'token_str': '▁games'},
{'sequence': '[CLS] dont play around leh[SEP]',
'score': 0.013458190485835075,
'token': 140,
'token_str': '▁around'}]
>>> nlp("catch no [MASK]")
[{'sequence': '[CLS] catch no ball[SEP]',
'score': 0.6197211146354675,
'token': 1592,
'token_str': '▁ball'},
{'sequence': '[CLS] catch no balls[SEP]',
'score': 0.08441998809576035,
'token': 7152,
'token_str': '▁balls'},
{'sequence': '[CLS] catch no joke[SEP]',
'score': 0.0676785409450531,
'token': 8186,
'token_str': '▁joke'},
{'sequence': '[CLS] catch no?[SEP]',
'score': 0.040638409554958344,
'token': 60,
'token_str': '?'},
{'sequence': '[CLS] catch no one[SEP]',
'score': 0.03546864539384842,
'token': 53,
'token_str': '▁one'}]
>>> nlp("confirm plus [MASK]")
[{'sequence': '[CLS] confirm plus chop[SEP]',
'score': 0.9608421921730042,
'token': 17144,
'token_str': '▁chop'},
{'sequence': '[CLS] confirm plus guarantee[SEP]',
'score': 0.011784233152866364,
'token': 9120,
'token_str': '▁guarantee'},
{'sequence': '[CLS] confirm plus confirm[SEP]',
'score': 0.010571340098977089,
'token': 10265,
'token_str': '▁confirm'},
{'sequence': '[CLS] confirm plus egg[SEP]',
'score': 0.0033525123726576567,
'token': 6387,
'token_str': '▁egg'},
{'sequence': '[CLS] confirm plus bet[SEP]',
'score': 0.0008760977652855217,
'token': 5676,
'token_str': '▁bet'}]
```
Here is how to use this model to get the features of a given text in PyTorch:
```python
from transformers import AlbertTokenizer, AlbertModel
tokenizer = AlbertTokenizer.from_pretrained('zanelim/singbert-lite-sg')
model = AlbertModel.from_pretrained("zanelim/singbert-lite-sg")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
```
and in TensorFlow:
```python
from transformers import AlbertTokenizer, TFAlbertModel
tokenizer = AlbertTokenizer.from_pretrained("zanelim/singbert-lite-sg")
model = TFAlbertModel.from_pretrained("zanelim/singbert-lite-sg")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='tf')
output = model(encoded_input)
```
#### Limitations and bias
This model was finetuned on colloquial Singlish and Manglish corpus, hence it is best applied on downstream tasks involving the main
constituent languages- english, mandarin, malay. Also, as the training data is mainly from forums, beware of existing inherent bias.
## Training data
Colloquial singlish and manglish (both are a mixture of English, Mandarin, Tamil, Malay, and other local dialects like Hokkien, Cantonese or Teochew)
corpus. The corpus is collected from subreddits- `r/singapore` and `r/malaysia`, and forums such as `hardwarezone`.
## Training procedure
Initialized with [albert base v2](https://github.com/google-research/albert#albert) vocab and checkpoints (pre-trained weights).
Pre-training was further finetuned on training data with the following hyperparameters
* train_batch_size: 4096
* max_seq_length: 128
* num_train_steps: 125000
* num_warmup_steps: 5000
* learning_rate: 0.00176
* hardware: TPU v3-8
|
|
zanelim/singbert | 2021-05-20T09:38:41.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"pretraining",
"en",
"dataset:reddit singapore, malaysia",
"dataset:hardwarezone",
"transformers",
"singapore",
"sg",
"singlish",
"malaysia",
"ms",
"manglish",
"bert-base-uncased",
"license:mit"
] | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
] | zanelim | 218 | transformers | ---
language: en
tags:
- singapore
- sg
- singlish
- malaysia
- ms
- manglish
- bert-base-uncased
license: mit
datasets:
- reddit singapore, malaysia
- hardwarezone
widget:
- text: "kopi c siew [MASK]"
- text: "die [MASK] must try"
---
# Model name
SingBert - Bert for Singlish (SG) and Manglish (MY).
## Model description
[BERT base uncased](https://github.com/google-research/bert#pre-trained-models), with pre-training finetuned on
[singlish](https://en.wikipedia.org/wiki/Singlish) and [manglish](https://en.wikipedia.org/wiki/Manglish) data.
## Intended uses & limitations
#### How to use
```python
>>> from transformers import pipeline
>>> nlp = pipeline('fill-mask', model='zanelim/singbert')
>>> nlp("kopi c siew [MASK]")
[{'sequence': '[CLS] kopi c siew dai [SEP]',
'score': 0.5092713236808777,
'token': 18765,
'token_str': 'dai'},
{'sequence': '[CLS] kopi c siew mai [SEP]',
'score': 0.3515934646129608,
'token': 14736,
'token_str': 'mai'},
{'sequence': '[CLS] kopi c siew bao [SEP]',
'score': 0.05576375499367714,
'token': 25945,
'token_str': 'bao'},
{'sequence': '[CLS] kopi c siew. [SEP]',
'score': 0.006019321270287037,
'token': 1012,
'token_str': '.'},
{'sequence': '[CLS] kopi c siew sai [SEP]',
'score': 0.0038361591286957264,
'token': 18952,
'token_str': 'sai'}]
>>> nlp("one teh c siew dai, and one kopi [MASK].")
[{'sequence': '[CLS] one teh c siew dai, and one kopi c [SEP]',
'score': 0.6176503300666809,
'token': 1039,
'token_str': 'c'},
{'sequence': '[CLS] one teh c siew dai, and one kopi o [SEP]',
'score': 0.21094971895217896,
'token': 1051,
'token_str': 'o'},
{'sequence': '[CLS] one teh c siew dai, and one kopi. [SEP]',
'score': 0.13027705252170563,
'token': 1012,
'token_str': '.'},
{'sequence': '[CLS] one teh c siew dai, and one kopi! [SEP]',
'score': 0.004680239595472813,
'token': 999,
'token_str': '!'},
{'sequence': '[CLS] one teh c siew dai, and one kopi w [SEP]',
'score': 0.002034128177911043,
'token': 1059,
'token_str': 'w'}]
>>> nlp("dont play [MASK] leh")
[{'sequence': '[CLS] dont play play leh [SEP]',
'score': 0.9281464219093323,
'token': 2377,
'token_str': 'play'},
{'sequence': '[CLS] dont play politics leh [SEP]',
'score': 0.010990909300744534,
'token': 4331,
'token_str': 'politics'},
{'sequence': '[CLS] dont play punk leh [SEP]',
'score': 0.005583590362221003,
'token': 7196,
'token_str': 'punk'},
{'sequence': '[CLS] dont play dirty leh [SEP]',
'score': 0.0025784350000321865,
'token': 6530,
'token_str': 'dirty'},
{'sequence': '[CLS] dont play cheat leh [SEP]',
'score': 0.0025066907983273268,
'token': 21910,
'token_str': 'cheat'}]
>>> nlp("catch no [MASK]")
[{'sequence': '[CLS] catch no ball [SEP]',
'score': 0.7922210693359375,
'token': 3608,
'token_str': 'ball'},
{'sequence': '[CLS] catch no balls [SEP]',
'score': 0.20503675937652588,
'token': 7395,
'token_str': 'balls'},
{'sequence': '[CLS] catch no tail [SEP]',
'score': 0.0006608376861549914,
'token': 5725,
'token_str': 'tail'},
{'sequence': '[CLS] catch no talent [SEP]',
'score': 0.0002158183924620971,
'token': 5848,
'token_str': 'talent'},
{'sequence': '[CLS] catch no prisoners [SEP]',
'score': 5.3481446229852736e-05,
'token': 5895,
'token_str': 'prisoners'}]
>>> nlp("confirm plus [MASK]")
[{'sequence': '[CLS] confirm plus chop [SEP]',
'score': 0.992355227470398,
'token': 24494,
'token_str': 'chop'},
{'sequence': '[CLS] confirm plus one [SEP]',
'score': 0.0037301010452210903,
'token': 2028,
'token_str': 'one'},
{'sequence': '[CLS] confirm plus minus [SEP]',
'score': 0.0014284878270700574,
'token': 15718,
'token_str': 'minus'},
{'sequence': '[CLS] confirm plus 1 [SEP]',
'score': 0.0011354683665558696,
'token': 1015,
'token_str': '1'},
{'sequence': '[CLS] confirm plus chopped [SEP]',
'score': 0.0003804611915256828,
'token': 24881,
'token_str': 'chopped'}]
>>> nlp("die [MASK] must try")
[{'sequence': '[CLS] die die must try [SEP]',
'score': 0.9552758932113647,
'token': 3280,
'token_str': 'die'},
{'sequence': '[CLS] die also must try [SEP]',
'score': 0.03644804656505585,
'token': 2036,
'token_str': 'also'},
{'sequence': '[CLS] die liao must try [SEP]',
'score': 0.003282855963334441,
'token': 727,
'token_str': 'liao'},
{'sequence': '[CLS] die already must try [SEP]',
'score': 0.0004937972989864647,
'token': 2525,
'token_str': 'already'},
{'sequence': '[CLS] die hard must try [SEP]',
'score': 0.0003659659414552152,
'token': 2524,
'token_str': 'hard'}]
```
Here is how to use this model to get the features of a given text in PyTorch:
```python
from transformers import BertTokenizer, BertModel
tokenizer = BertTokenizer.from_pretrained('zanelim/singbert')
model = BertModel.from_pretrained("zanelim/singbert")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
```
and in TensorFlow:
```python
from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained("zanelim/singbert")
model = TFBertModel.from_pretrained("zanelim/singbert")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='tf')
output = model(encoded_input)
```
#### Limitations and bias
This model was finetuned on colloquial Singlish and Manglish corpus, hence it is best applied on downstream tasks involving the main
constituent languages- english, mandarin, malay. Also, as the training data is mainly from forums, beware of existing inherent bias.
## Training data
Colloquial singlish and manglish (both are a mixture of English, Mandarin, Tamil, Malay, and other local dialects like Hokkien, Cantonese or Teochew)
corpus. The corpus is collected from subreddits- `r/singapore` and `r/malaysia`, and forums such as `hardwarezone`.
## Training procedure
Initialized with [bert base uncased](https://github.com/google-research/bert#pre-trained-models) vocab and checkpoints (pre-trained weights).
Top 1000 custom vocab tokens (non-overlapped with original bert vocab) were further extracted from training data and filled into unused tokens in original bert vocab.
Pre-training was further finetuned on training data with the following hyperparameters
* train_batch_size: 512
* max_seq_length: 128
* num_train_steps: 300000
* num_warmup_steps: 5000
* learning_rate: 2e-5
* hardware: TPU v3-8
|
|
zbmain/bert_cn_finetuning | 2021-05-20T09:39:42.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"README.md",
"README2.md",
"added_tokens.json",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tensorflow-gpu-macosx-1.8.1.tar.gz",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zbmain | 12 | transformers | bert_cn_finetuning model
|
zbmain/bert_finetuning_test | 2020-11-23T19:59:14.000Z | [] | [
".gitattributes"
] | zbmain | 0 | |||
zbmain/test | 2020-11-24T12:12:29.000Z | [
"pytorch"
] | [
".gitattributes",
"README.md",
"pytorch_model.bin"
] | zbmain | 0 | 123
|
||
zein/ArXivBert | 2021-04-19T18:59:13.000Z | [] | [
".gitattributes"
] | zein | 0 | |||
zemin/trans_model | 2021-02-10T18:35:40.000Z | [] | [
".gitattributes"
] | zemin | 0 | |||
zeonai/deepdelve-model1-qa | 2021-05-20T09:40:39.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"vocab.txt"
] | zeonai | 7 | transformers | |
zeonai/deepdelve-model2-qa | 2021-05-20T09:41:32.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"vocab.txt"
] | zeonai | 11 | transformers | |
zeropointbreakthrough/test | 2021-06-02T00:57:13.000Z | [] | [
".gitattributes"
] | zeropointbreakthrough | 0 | |||
zhangchulong/bert-large-cased-1 | 2020-12-23T08:06:05.000Z | [] | [
".gitattributes"
] | zhangchulong | 0 | |||
zhangchulong/bert-large-cased-wwm | 2020-12-23T03:40:24.000Z | [] | [
".gitattributes"
] | zhangchulong | 0 | |||
zhangqi/bert-base-chinese | 2021-02-04T12:55:53.000Z | [] | [
".gitattributes"
] | zhangqi | 0 | |||
zhangqi/bert-base-uncased | 2021-02-05T04:06:31.000Z | [] | [
".gitattributes"
] | zhangqi | 0 | |||
zhangqi/bert_base_chinese | 2021-02-04T14:14:33.000Z | [] | [
".gitattributes"
] | zhangqi | 0 | |||
zhangqi/model_name | 2021-02-04T14:22:03.000Z | [] | [
".gitattributes"
] | zhangqi | 0 | |||
zhangxy-2019/cu_dstc9_dialoGPT | 2021-05-23T14:05:15.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zhangxy-2019 | 14 | transformers | |
zhangxy-2019/cunlp-gpt2-dialog | 2021-05-23T14:07:17.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zhangxy-2019 | 22 | transformers | |
zhaochaocs/storygen | 2021-02-23T22:29:45.000Z | [] | [
".gitattributes"
] | zhaochaocs | 0 | |||
zhaochongshan/bert_cn_finetunning | 2021-05-08T03:45:51.000Z | [] | [
".gitattributes"
] | zhaochongshan | 0 | |||
zharry29/goal_benchmark_bert | 2021-05-20T09:42:25.000Z | [
"pytorch",
"jax",
"bert",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"is_test_false_eval_results.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zharry29 | 19 | transformers | ||
zharry29/goal_benchmark_gpt | 2021-05-23T14:08:46.000Z | [
"pytorch",
"gpt2",
"transformers"
] | [
".gitattributes",
"config.json",
"is_test_false_eval_results.txt",
"merges.txt",
"model_pred_false.csv",
"pred_probs.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zharry29 | 17 | transformers | ||
zharry29/goal_benchmark_roberta | 2021-05-20T23:25:11.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"is_test_false_eval_results.txt",
"merges.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zharry29 | 14 | transformers | ||
zharry29/goal_benchmark_xlnet | 2020-09-16T20:02:36.000Z | [
"pytorch",
"xlnet",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"is_test_false_eval_results.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
] | zharry29 | 26 | transformers | ||
zharry29/intent_enwh_rl | 2020-09-16T20:10:41.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin"
] | zharry29 | 15 | transformers | ||
zharry29/intent_enwh_xlmr | 2020-09-16T20:11:13.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"training_args.bin"
] | zharry29 | 11 | transformers | ||
zharry29/intent_fb-en_id_rl | 2021-05-20T23:27:13.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"training_args.bin",
"vocab.json"
] | zharry29 | 12 | transformers | ||
zharry29/intent_fb-en_id_xlmr | 2021-05-20T23:30:29.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"training_args.bin",
"vocab.json"
] | zharry29 | 14 | transformers | ||
zharry29/intent_fb-en_wh_id_rl | 2021-05-20T23:33:07.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"training_args.bin",
"vocab.json"
] | zharry29 | 16 | transformers | ||
zharry29/intent_fb-es_enwh_id | 2020-09-16T20:13:57.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"training_args.bin"
] | zharry29 | 11 | transformers | ||
zharry29/intent_fb-es_id | 2020-09-16T20:14:32.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"training_args.bin"
] | zharry29 | 12 | transformers | ||
zharry29/intent_fb-es_wh_id | 2020-09-16T20:15:03.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"training_args.bin"
] | zharry29 | 11 | transformers | ||
zharry29/intent_fb-th_enwh_id | 2020-09-16T20:15:38.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"training_args.bin"
] | zharry29 | 12 | transformers | ||
zharry29/intent_fb-th_id | 2020-09-16T20:16:29.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"training_args.bin"
] | zharry29 | 13 | transformers | ||
zharry29/intent_fb-th_wh_id | 2020-09-16T20:17:00.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"training_args.bin"
] | zharry29 | 11 | transformers | ||
zharry29/intent_sgd_id | 2021-05-20T23:36:23.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"training_args.bin",
"vocab.json"
] | zharry29 | 10 | transformers | ||
zharry29/intent_sgd_wh_id | 2021-05-20T23:38:40.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"training_args.bin",
"vocab.json"
] | zharry29 | 13 | transformers | ||
zharry29/intent_snips_id | 2021-05-20T23:47:11.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zharry29 | 27 | transformers | ||
zharry29/intent_snips_wh_id | 2021-05-20T23:49:50.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"finetune5.log",
"flax_model.msgpack",
"pytorch_model.bin",
"training_args.bin",
"vocab.json"
] | zharry29 | 14 | transformers | ||
zharry29/intent_thwh | 2020-09-16T20:44:55.000Z | [
"pytorch",
"xlm-roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"training_args.bin"
] | zharry29 | 13 | transformers | ||
zharry29/order_benchmark_bert | 2021-05-20T09:43:21.000Z | [
"pytorch",
"jax",
"bert",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"is_test_false_eval_results.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zharry29 | 17 | transformers | ||
zharry29/order_benchmark_gpt | 2021-05-23T14:09:14.000Z | [
"pytorch",
"gpt2",
"transformers"
] | [
".gitattributes",
"config.json",
"is_test_false_eval_results.txt",
"merges.txt",
"model_pred_false.csv",
"pred_probs.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zharry29 | 19 | transformers | ||
zharry29/order_benchmark_roberta | 2021-05-20T23:51:12.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"is_test_false_eval_results.txt",
"merges.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zharry29 | 18 | transformers | ||
zharry29/order_benchmark_xlnet | 2020-09-16T20:03:11.000Z | [
"pytorch",
"xlnet",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"is_test_false_eval_results.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
] | zharry29 | 19 | transformers | ||
zharry29/step_benchmark_bert | 2021-05-20T09:44:40.000Z | [
"pytorch",
"jax",
"bert",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"is_test_false_eval_results.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zharry29 | 18 | transformers | ||
zharry29/step_benchmark_gpt | 2021-05-23T14:09:43.000Z | [
"pytorch",
"gpt2",
"transformers"
] | [
".gitattributes",
"config.json",
"is_test_false_eval_results.txt",
"merges.txt",
"model_pred_false.csv",
"pred_probs.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zharry29 | 18 | transformers | ||
zharry29/step_benchmark_roberta | 2021-05-20T23:52:30.000Z | [
"pytorch",
"jax",
"roberta",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"is_test_false_eval_results.txt",
"merges.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zharry29 | 16 | transformers | ||
zharry29/step_benchmark_xlnet | 2020-09-16T19:57:55.000Z | [
"pytorch",
"xlnet",
"multiple-choice",
"transformers"
] | [
".gitattributes",
"config.json",
"is_test_false_eval_results.txt",
"model_pred_false.csv",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
] | zharry29 | 22 | transformers | ||
zhe/sst2 | 2021-04-24T10:13:56.000Z | [] | [
".gitattributes"
] | zhe | 0 | |||
zhewhen/pairwise_similarity | 2021-05-28T16:10:42.000Z | [] | [
".gitattributes"
] | zhewhen | 0 | |||
zhiheng-huang/bert-base-uncased-embedding-relative-key-query | 2021-05-20T09:45:59.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zhiheng-huang | 116 | transformers | |
zhiheng-huang/bert-base-uncased-embedding-relative-key | 2021-05-20T09:46:58.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zhiheng-huang | 79 | transformers | |
zhiheng-huang/bert-large-uncased-whole-word-masking-embedding-relative-key-query | 2021-05-20T09:48:50.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zhiheng-huang | 66 | transformers | |
zichju/Eelai | 2021-01-12T00:53:34.000Z | [] | [
".gitattributes"
] | zichju | 0 | |||
zitterbewegung/DialoGPT-medium-ja | 2021-05-23T14:11:28.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
] | zitterbewegung | 17 | transformers | |
zjt/123 | 2021-05-11T00:58:44.000Z | [] | [
".gitattributes",
"README.md"
] | zjt | 0 | |||
zlucia/bert-double | 2021-06-05T22:34:49.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"pretraining",
"en",
"arxiv:2104.08671",
"arxiv:1810.04805",
"arxiv:1903.10676",
"transformers",
"fill-mask",
"pipeline_tag:fill-mask"
] | fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
] | zlucia | 55 | transformers | ---
language: en
pipeline_tag: fill-mask
---
### BERT (double)
Model and tokenizer files for BERT (double) model from [When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset](https://arxiv.org/abs/2104.08671).
### Training Data
BERT (double) is pretrained using the same English Wikipedia corpus that the base BERT model (uncased, 110M parameters), [bert-base-uncased](https://huggingface.co/bert-base-uncased), was pretrained on. For more information on the pretraining corpus, refer to the [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) paper.
### Training Objective
This model is initialized with the base BERT model (uncased, 110M parameters), [bert-base-uncased](https://huggingface.co/bert-base-uncased), and trained for an additional 1M steps on the MLM and NSP objective.
This facilitates a direct comparison to our BERT-based models for the legal domain, which are also pretrained for 2M total steps.
- Legal-BERT: zlucia/legalbert (https://huggingface.co/zlucia/legalbert)
- Custom Legal-BERT: zlucia/custom-legalbert (https://huggingface.co/zlucia/custom-legalbert)
### Usage
Please see the [casehold repository](https://github.com/reglab/casehold) for scripts that support computing pretrain loss and finetuning on BERT (double) for classification and multiple choice tasks described in the paper: Overruling, Terms of Service, CaseHOLD.
See `demo.ipynb` in the casehold repository for details on calculating domain specificity (DS) scores for tasks or task examples by taking the difference in pretrain loss on BERT (double) and Legal-BERT. DS score may be readily extended to estimate domain specificity of tasks in other domains using BERT (double) and existing pretrained models (e.g., [SciBERT](https://arxiv.org/abs/1903.10676)).
### Citation
@inproceedings{zhengguha2021,
title={When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset},
author={Lucia Zheng and Neel Guha and Brandon R. Anderson and Peter Henderson and Daniel E. Ho},
year={2021},
eprint={2104.08671},
archivePrefix={arXiv},
primaryClass={cs.CL},
booktitle={Proceedings of the 18th International Conference on Artificial Intelligence and Law},
publisher={Association for Computing Machinery},
note={(in press)}
}
Lucia Zheng, Neel Guha, Brandon R. Anderson, Peter Henderson, and Daniel E. Ho. 2021. When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset. In *Proceedings of the 18th International Conference on Artificial Intelligence and Law (ICAIL '21)*, June 21-25, 2021, São Paulo, Brazil. ACM Inc., New York, NY, (in press). arXiv: [2104.08671 [cs.CL]](https://arxiv.org/abs/2104.08671). |
zlucia/custom-legalbert | 2021-06-05T22:30:43.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"en",
"arxiv:2104.08671",
"arxiv:1808.06226",
"transformers",
"legal",
"fill-mask",
"pipeline_tag:fill-mask"
] | fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
] | zlucia | 150 | transformers | ---
language: en
pipeline_tag: fill-mask
tags:
- legal
---
### Custom Legal-BERT
Model and tokenizer files for Custom Legal-BERT model from [When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset](https://arxiv.org/abs/2104.08671).
### Training Data
The pretraining corpus was constructed by ingesting the entire Harvard Law case corpus from 1965 to the present (https://case.law/). The size of this corpus (37GB) is substantial, representing 3,446,187 legal decisions across all federal and state courts, and is larger than the size of the BookCorpus/Wikipedia corpus originally used to train BERT (15GB).
### Training Objective
This model is pretrained from scratch for 2M steps on the MLM and NSP objective, with tokenization and sentence segmentation adapted for legal text (cf. the paper).
The model also uses a custom domain-specific legal vocabulary. The vocabulary set is constructed using [SentencePiece](https://arxiv.org/abs/1808.06226) on a subsample (approx. 13M) of sentences from our pretraining corpus, with the number of tokens fixed to 32,000.
### Usage
Please see the [casehold repository](https://github.com/reglab/casehold) for scripts that support computing pretrain loss and finetuning on Custom Legal-BERT for classification and multiple choice tasks described in the paper: Overruling, Terms of Service, CaseHOLD.
### Citation
```
@inproceedings{zhengguha2021,
title={When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset},
author={Lucia Zheng and Neel Guha and Brandon R. Anderson and Peter Henderson and Daniel E. Ho},
year={2021},
eprint={2104.08671},
archivePrefix={arXiv},
primaryClass={cs.CL},
booktitle={Proceedings of the 18th International Conference on Artificial Intelligence and Law},
publisher={Association for Computing Machinery},
note={(in press)
}
```
Lucia Zheng, Neel Guha, Brandon R. Anderson, Peter Henderson, and Daniel E. Ho. 2021. When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset. In *Proceedings of the 18th International Conference on Artificial Intelligence and Law (ICAIL '21)*, June 21-25, 2021, São Paulo, Brazil. ACM Inc., New York, NY, (in press). arXiv: [2104.08671 \\[cs.CL\\]](https://arxiv.org/abs/2104.08671). |
zlucia/legalbert | 2021-06-05T22:15:13.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"en",
"arxiv:2104.08671",
"transformers",
"legal",
"fill-mask",
"pipeline_tag:fill-mask"
] | fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
] | zlucia | 162 | transformers | ---
language: en
pipeline_tag: fill-mask
tags:
- legal
---
### Legal-BERT
Model and tokenizer files for Legal-BERT model from [When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset of 53,000+ Legal Holdings](https://arxiv.org/abs/2104.08671).
### Training Data
The pretraining corpus was constructed by ingesting the entire Harvard Law case corpus from 1965 to the present (https://case.law/). The size of this corpus (37GB) is substantial, representing 3,446,187 legal decisions across all federal and state courts, and is larger than the size of the BookCorpus/Wikipedia corpus originally used to train BERT (15GB).
### Training Objective
This model is initialized with the base BERT model (uncased, 110M parameters), [bert-base-uncased](https://huggingface.co/bert-base-uncased), and trained for an additional 1M steps on the MLM and NSP objective, with tokenization and sentence segmentation adapted for legal text (cf. the paper).
### Usage
Please see the [casehold repository](https://github.com/reglab/casehold) for scripts that support computing pretrain loss and finetuning on Legal-BERT for classification and multiple choice tasks described in the paper: Overruling, Terms of Service, CaseHOLD.
### Citation
```
@inproceedings{zhengguha2021,
title={When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset},
author={Lucia Zheng and Neel Guha and Brandon R. Anderson and Peter Henderson and Daniel E. Ho},
year={2021},
eprint={2104.08671},
archivePrefix={arXiv},
primaryClass={cs.CL},
booktitle={Proceedings of the 18th International Conference on Artificial Intelligence and Law},
publisher={Association for Computing Machinery},
note={(in press)}
}
```
Lucia Zheng, Neel Guha, Brandon R. Anderson, Peter Henderson, and Daniel E. Ho. 2021. When Does Pretraining Help? Assessing Self-Supervised Learning for Law and the CaseHOLD Dataset. In *Proceedings of the 18th International Conference on Artificial Intelligence and Law (ICAIL '21)*, June 21-25, 2021, São Paulo, Brazil. ACM Inc., New York, NY, (in press). arXiv: [2104.08671 \\[cs.CL\\]](https://arxiv.org/abs/2104.08671).
|
znigeln/test | 2021-05-10T14:59:11.000Z | [] | [
".gitattributes"
] | znigeln | 0 | |||
zoeozone/gadgetgreen | 2021-03-22T22:31:02.000Z | [] | [
".gitattributes",
"README.md"
] | zoeozone | 0 | |||
zoeozone/zoeclone | 2021-03-30T00:20:32.000Z | [] | [
".gitattributes"
] | zoeozone | 0 | |||
zoeozone/zoeozone | 2021-03-03T02:00:36.000Z | [] | [
".gitattributes"
] | zoeozone | 0 | |||
zoeymeng913/bert_cn_finetuning | 2021-05-20T09:54:41.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"added_tokens.json",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt",
"checkpoint-250/training_args.bin"
] | zoeymeng913 | 40 | transformers | |
zoeymeng913/bert_finetuning_test | 2021-05-20T09:55:05.000Z | [
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"config.json",
"eval_results.txt",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin"
] | zoeymeng913 | 18 | transformers | |
zohaib-khan/bert-medium-legal | 2020-12-09T12:52:23.000Z | [] | [
".gitattributes"
] | zohaib-khan | 0 | |||
zqf03118/ItcastAI | 2021-02-02T14:39:19.000Z | [] | [
".gitattributes"
] | zqf03118 | 0 | |||
zqf03118/bert_cn_finetuning | 2021-05-20T09:55:48.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"added_tokens.json",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zqf03118 | 6 | transformers | |
zqf03118/bert_finetuning_test | 2021-05-20T09:56:44.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"added_tokens.json",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
] | zqf03118 | 9 | transformers | |
zr19z1211/mask-fill | 2021-04-30T07:46:51.000Z | [] | [
".gitattributes"
] | zr19z1211 | 0 | |||
zundel/model_name | 2021-04-06T14:31:51.000Z | [] | [
".gitattributes"
] | zundel | 0 | |||
zxsu/test_model | 2021-05-10T12:40:19.000Z | [] | [
".gitattributes"
] | zxsu | 0 | |||
zyberg2091/distilbert-base-multilingual-toxicity-classifier | 2021-01-06T20:43:33.000Z | [
"tf",
"distilbert",
"text-classification",
"transformers"
] | text-classification | [
".gitattributes",
"config.json",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
] | zyberg2091 | 27 | transformers | |
versae/mBERT-5lang-adobo2021 | 2021-06-19T00:23:38.000Z | [] | [
".gitattributes"
] | versae | 0 | |||
worsterman/DialoGPT-medium-mulder | 2021-06-19T00:46:42.000Z | [] | [
".gitattributes"
] | worsterman | 0 | |||
wassemgtk/snippetv2 | 2021-06-19T01:31:42.000Z | [] | [
".gitattributes"
] | wassemgtk | 0 | |||
danurahul/yoav_gpt_neo1.3B_delimiter | 2021-06-19T02:27:20.000Z | [
"pytorch",
"gpt_neo",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"merges.txt",
"optimizer.pt",
"pytorch_model.bin",
"rng_state.pth",
"scheduler.pt",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"trainer_state.json",
"training_args.bin",
"vocab.json"
] | danurahul | 0 | transformers | |
saichandrapandraju/t5_small_tabqgen | 2021-06-19T02:33:50.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer.json",
"tokenizer_config.json"
] | saichandrapandraju | 0 | transformers | |
saichandrapandraju/t5_large_tabqgen | 2021-06-19T02:50:02.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer.json",
"tokenizer_config.json"
] | saichandrapandraju | 0 | transformers | |
sambelanz/copaamerica2021 | 2021-06-19T03:24:15.000Z | [] | [
".gitattributes",
"Messi.md"
] | sambelanz | 0 | |||
p208p2002/qmst-qgg-qa | 2021-06-19T05:04:13.000Z | [
"pytorch",
"bart",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
] | p208p2002 | 0 | transformers | |
p208p2002/qmst-qgg | 2021-06-19T05:17:43.000Z | [
"pytorch",
"bart",
"seq2seq",
"transformers",
"text2text-generation"
] | text2text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
] | p208p2002 | 0 | transformers | |
royam0820/dummy-model | 2021-06-19T09:05:32.000Z | [] | [
".gitattributes"
] | royam0820 | 0 | |||
motiondew/set_date_1-bert | 2021-06-19T10:57:12.000Z | [
"pytorch",
"bert",
"masked-lm",
"transformers",
"fill-mask"
] | fill-mask | [
".gitattributes",
"config.json",
"prediction_head_0.bin",
"prediction_head_0_config.json",
"processor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
] | motiondew | 0 | transformers | |
noelmathewisaac/inspirational-quotes-distilgpt2 | 2021-06-19T11:01:28.000Z | [
"pytorch",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
] | noelmathewisaac | 0 | transformers | |
harish/AllTokenFineTunedNLI-E1 | 2021-06-19T11:24:56.000Z | [
"transformers"
] | [
".gitattributes",
"config.json",
"modules.json",
"similarity_evaluation_sts-dev_results.csv",
"similarity_evaluation_sts-test_results.csv",
"0_Transformer/config.json",
"0_Transformer/pytorch_model.bin",
"0_Transformer/sentence_bert_config.json",
"0_Transformer/special_tokens_map.json",
"0_Transformer/tokenizer_config.json",
"0_Transformer/vocab.txt",
"1_Pooling/config.json"
] | harish | 0 | transformers | ||
remotejob/tweetsTINYGPT2fi_v1 | 2021-06-19T15:35:18.000Z | [
"pytorch",
"rust",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
] | text-generation | [
".gitattributes",
"config.json",
"merges.txt",
"pytorch_model.bin",
"rust_model.ot",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
] | remotejob | 0 | transformers |