Spaces:
Running
Running
gorkemgoknar
commited on
Commit
•
7b230b5
1
Parent(s):
74793b0
Update app.py
Browse files
app.py
CHANGED
@@ -17,6 +17,7 @@ dynamic_temperature_range = 0.15
|
|
17 |
rand_range = random.uniform(-1 * dynamic_temperature_range , dynamic_temperature_range )
|
18 |
temperature = base_temperature + rand_range
|
19 |
|
|
|
20 |
|
21 |
#See document for experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/
|
22 |
|
@@ -27,7 +28,13 @@ def get_chat_response(name, input_txt = "Hello , what is your name?"):
|
|
27 |
|
28 |
##can respond well to history as well but for this quick demo not implemented
|
29 |
##see metayazar.com/chatbot for a min 2 history
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
#optimum response and speed
|
33 |
#50 token max length, temperature = 1.3 makes it creative
|
|
|
17 |
rand_range = random.uniform(-1 * dynamic_temperature_range , dynamic_temperature_range )
|
18 |
temperature = base_temperature + rand_range
|
19 |
|
20 |
+
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
|
21 |
|
22 |
#See document for experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/
|
23 |
|
|
|
28 |
|
29 |
##can respond well to history as well but for this quick demo not implemented
|
30 |
##see metayazar.com/chatbot for a min 2 history
|
31 |
+
|
32 |
+
##this is a multi-speaker model, currently no history, so ending with "<speaker2>" to get response. depends on who starts conversation it can be speaker1
|
33 |
+
##if there is a history depends on who started conversation it should end with <speaker1>
|
34 |
+
#historical implementation not implemented in this demo
|
35 |
+
|
36 |
+
|
37 |
+
bot_input_ids = tokenizer.encode(tokenizer.bos_token + personality + tokenizer.eos_token + input_txt + tokenizer.eos_token + "<speaker2>" , return_tensors='pt')
|
38 |
|
39 |
#optimum response and speed
|
40 |
#50 token max length, temperature = 1.3 makes it creative
|