Update README.md
Browse files
README.md
CHANGED
@@ -6,3 +6,43 @@ pipeline_tag: conversational
|
|
6 |
tags:
|
7 |
- gpt-2
|
8 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
tags:
|
7 |
- gpt-2
|
8 |
---
|
9 |
+
```python
|
10 |
+
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
11 |
+
|
12 |
+
def generate_response(input_text):
|
13 |
+
|
14 |
+
inputs = tokenizer(input_text, return_tensors="pt")
|
15 |
+
output_sequences = model.generate(
|
16 |
+
input_ids=inputs['input_ids'],
|
17 |
+
attention_mask=inputs['attention_mask'],
|
18 |
+
max_length=100, # Adjusted max_length
|
19 |
+
temperature=0.3,
|
20 |
+
top_k=40,
|
21 |
+
top_p=0.85,
|
22 |
+
num_return_sequences=1,
|
23 |
+
no_repeat_ngram_size=2,
|
24 |
+
pad_token_id=tokenizer.eos_token_id,
|
25 |
+
early_stopping=True,
|
26 |
+
do_sample=True,
|
27 |
+
use_cache=True,
|
28 |
+
)
|
29 |
+
|
30 |
+
full_generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
|
31 |
+
|
32 |
+
bot_response_start = full_generated_text.find('[Bot]') + len('[Bot]')
|
33 |
+
bot_response = full_generated_text[bot_response_start:]
|
34 |
+
|
35 |
+
last_period_index = bot_response.rfind('.')
|
36 |
+
if last_period_index != -1:
|
37 |
+
bot_response = bot_response[:last_period_index + 1]
|
38 |
+
|
39 |
+
return bot_response.strip()
|
40 |
+
|
41 |
+
|
42 |
+
model_name = 'KhantKyaw/Chat_GPT-2'
|
43 |
+
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
44 |
+
model = GPT2LMHeadModel.from_pretrained(model_name)
|
45 |
+
response = generate_response(user_input)
|
46 |
+
print("Chatbot:", response)
|
47 |
+
|
48 |
+
```
|