solemn-leader commited on
Commit
0b547e7
1 Parent(s): d9f55c5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +15 -15
README.md CHANGED
@@ -36,20 +36,20 @@ from transformers import AutoTokenizer, AutoModelWithLMHead
36
  tokenizer = AutoTokenizer.from_pretrained('tinkoff-ai/ruDialoGPT-medium')
37
  model = AutoModelWithLMHead.from_pretrained('tinkoff-ai/ruDialoGPT-medium')
38
  inputs = tokenizer('@@ПЕРВЫЙ@@ привет @@ВТОРОЙ@@ привет @@ПЕРВЫЙ@@ как дела? @@ВТОРОЙ@@', return_tensors='pt')
39
- with torch.inference_mode():
40
- generated_token_ids = model.generate(
41
- **inputs,
42
- top_k=10,
43
- top_p=0.95,
44
- num_beams=3,
45
- num_return_sequences=3,
46
- do_sample=True,
47
- no_repeat_ngram_size=2,
48
- temperature=1.2,
49
- repetition_penalty=1.2,
50
- length_penalty=1.0,
51
- eos_token_id=50257
52
- )
53
- context_with_response = [tokenizer.decode(sample_token_ids) for sample_token_ids in generated_token_ids]
54
  context_with_response
55
  ```
 
36
  tokenizer = AutoTokenizer.from_pretrained('tinkoff-ai/ruDialoGPT-medium')
37
  model = AutoModelWithLMHead.from_pretrained('tinkoff-ai/ruDialoGPT-medium')
38
  inputs = tokenizer('@@ПЕРВЫЙ@@ привет @@ВТОРОЙ@@ привет @@ПЕРВЫЙ@@ как дела? @@ВТОРОЙ@@', return_tensors='pt')
39
+ generated_token_ids = model.generate(
40
+ **inputs,
41
+ top_k=10,
42
+ top_p=0.95,
43
+ num_beams=3,
44
+ num_return_sequences=3,
45
+ do_sample=True,
46
+ no_repeat_ngram_size=2,
47
+ temperature=1.2,
48
+ repetition_penalty=1.2,
49
+ length_penalty=1.0,
50
+ eos_token_id=50257,
51
+ max_new_tokens=40
52
+ )
53
+ context_with_response = [tokenizer.decode(sample_token_ids) for sample_token_ids in generated_token_ids]
54
  context_with_response
55
  ```