prithivMLmods commited on
Commit
bacb2c1
·
verified ·
1 Parent(s): c84ccf0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -17
README.md CHANGED
@@ -49,21 +49,11 @@ Implementing a pre-trained BART model for automatic text completion:
49
 
50
  Base from facebook bart /
51
 
52
- from transformers import BartForConditionalGeneration, BartTokenizer
53
-
54
- # Load pre-trained BART model
55
- bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0) # takes a while to load
56
- tokenizer = BartTokenizer.from_pretrained("facebook/bart-large")
57
-
58
- # Input sentence
59
- sent = "GeekforGeeks has a <mask> article on BART."
60
-
61
- # Tokenize the input sentence
62
- tokenized_sent = tokenizer(sent, return_tensors='pt')
63
-
64
- # Generate the output sequence
65
- generated_encoded = bart_model.generate(tokenized_sent['input_ids'])
66
-
67
- # Decode the generated sequence and print
68
- print(tokenizer.batch_decode(generated_encoded, skip_special_tokens=True)[0])
69
 
 
49
 
50
  Base from facebook bart /
51
 
52
+ from transformers import BartForConditionalGeneration, BartTokenizer
53
+ bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0) # takes a while to load
54
+ tokenizer = BartTokenizer.from_pretrained("facebook/bart-large")
55
+ sent = "-----------your text here----- <mask> -----your text here ---"
56
+ tokenized_sent = tokenizer(sent, return_tensors='pt')
57
+ generated_encoded = bart_model.generate(tokenized_sent['input_ids'])
58
+ print(tokenizer.batch_decode(generated_encoded, skip_special_tokens=True)[0])
 
 
 
 
 
 
 
 
 
 
59