prithivMLmods
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -49,21 +49,11 @@ Implementing a pre-trained BART model for automatic text completion:
|
|
49 |
|
50 |
Base from facebook bart /
|
51 |
|
52 |
-
from transformers import BartForConditionalGeneration, BartTokenizer
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
sent = "GeekforGeeks has a <mask> article on BART."
|
60 |
-
|
61 |
-
# Tokenize the input sentence
|
62 |
-
tokenized_sent = tokenizer(sent, return_tensors='pt')
|
63 |
-
|
64 |
-
# Generate the output sequence
|
65 |
-
generated_encoded = bart_model.generate(tokenized_sent['input_ids'])
|
66 |
-
|
67 |
-
# Decode the generated sequence and print
|
68 |
-
print(tokenizer.batch_decode(generated_encoded, skip_special_tokens=True)[0])
|
69 |
|
|
|
49 |
|
50 |
Base from facebook bart /
|
51 |
|
52 |
+
from transformers import BartForConditionalGeneration, BartTokenizer
|
53 |
+
bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0) # takes a while to load
|
54 |
+
tokenizer = BartTokenizer.from_pretrained("facebook/bart-large")
|
55 |
+
sent = "-----------your text here----- <mask> -----your text here ---"
|
56 |
+
tokenized_sent = tokenizer(sent, return_tensors='pt')
|
57 |
+
generated_encoded = bart_model.generate(tokenized_sent['input_ids'])
|
58 |
+
print(tokenizer.batch_decode(generated_encoded, skip_special_tokens=True)[0])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|