Spaces:
Running
Running
gorkemgoknar
commited on
Commit
•
c432d2f
1
Parent(s):
68c73ac
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
|
3 |
from transformers import AutoConfig
|
4 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
@@ -9,6 +10,13 @@ model = GPT2LMHeadModel.from_pretrained('gorkemgoknar/gpt2chatbotenglish', confi
|
|
9 |
tokenizer = GPT2Tokenizer.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
|
10 |
tokenizer.model_max_length = 1024
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
#See document for experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/
|
14 |
|
@@ -24,13 +32,13 @@ def get_chat_response(name, input_txt = "Hello , what is your name?"):
|
|
24 |
#optimum response and speed
|
25 |
#50 token max length, temperature = 1.3 makes it creative
|
26 |
chat_history_ids = model.generate(
|
27 |
-
bot_input_ids, max_length=50,
|
28 |
pad_token_id=tokenizer.eos_token_id,
|
29 |
no_repeat_ngram_size=3,
|
30 |
do_sample=True,
|
31 |
-
top_k=
|
32 |
-
top_p=0.
|
33 |
-
temperature =
|
34 |
)
|
35 |
|
36 |
out_str = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
|
|
|
1 |
import gradio as gr
|
2 |
+
import random
|
3 |
|
4 |
from transformers import AutoConfig
|
5 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
|
|
10 |
tokenizer = GPT2Tokenizer.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
|
11 |
tokenizer.model_max_length = 1024
|
12 |
|
13 |
+
#Dynamic Temperature
|
14 |
+
base_temperature = 1.4
|
15 |
+
dynamic_temperature_range = 0.15
|
16 |
+
|
17 |
+
rand_range = random.uniform(-1 * dynamic_temperature_range , dynamic_temperature_range )
|
18 |
+
temperature = base_temperature + rand_range
|
19 |
+
|
20 |
|
21 |
#See document for experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/
|
22 |
|
|
|
32 |
#optimum response and speed
|
33 |
#50 token max length, temperature = 1.3 makes it creative
|
34 |
chat_history_ids = model.generate(
|
35 |
+
bot_input_ids,min_length =1, max_length=50,
|
36 |
pad_token_id=tokenizer.eos_token_id,
|
37 |
no_repeat_ngram_size=3,
|
38 |
do_sample=True,
|
39 |
+
top_k=50,
|
40 |
+
top_p=0.9,
|
41 |
+
temperature = temperature
|
42 |
)
|
43 |
|
44 |
out_str = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
|