File size: 4,770 Bytes
fba635a
c432d2f
a82e347
b3b32bd
 
 
 
 
 
ab12807
b3b32bd
 
c432d2f
74793b0
c432d2f
 
 
 
 
7b230b5
b3b32bd
fba1393
 
b3b32bd
fba1393
 
 
b3b32bd
caa4027
 
 
 
fba1393
 
7b230b5
 
 
 
caa4027
b3b32bd
 
fba1393
b3b32bd
c432d2f
b3b32bd
 
 
c432d2f
 
 
b3b32bd
 
 
 
 
68c73ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b3b32bd
 
 
68c73ac
7fd8e78
b3b32bd
fba635a
 
7fd8e78
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import gradio as gr
import random

from transformers import AutoConfig
from transformers import GPT2Tokenizer, GPT2LMHeadModel

config = AutoConfig.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
model = GPT2LMHeadModel.from_pretrained('gorkemgoknar/gpt2chatbotenglish', config=config)

tokenizer = GPT2Tokenizer.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
tokenizer.model_max_length = 1024

#Dynamic Temperature 
base_temperature = 1.3
dynamic_temperature_range = 0.15

rand_range = random.uniform(-1 * dynamic_temperature_range , dynamic_temperature_range )
temperature = base_temperature  + rand_range

SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]

#See document for experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/

def get_chat_response(name, input_txt = "Hello , what is your name?"):
  #I trained my dataset with "My name is " as prefix, this will let me dump personalities 
  #and also gives character sense if of his/her/its name!
  personality = "My name is " + name

  if input_txt[:-1] != ".":
    #add a dot after sentence to make model understand it more clearly
    input_txt = input_txt + "."
    
  ##can respond well to history as well but for this quick demo not implemented
  ##see metayazar.com/chatbot for a min 2 history
  
  ##this is a multi-speaker model, currently no history, so ending with "<speaker2>" to get response. depends on who starts conversation it can be speaker1
  ##if there is a history depends on who started conversation it should end with <speaker1>
  #historical implementation not implemented in this demo
    bot_input_ids =  tokenizer.encode(tokenizer.bos_token +  personality + "<speaker1>" + input_txt + tokenizer.eos_token   , return_tensors='pt')

  #optimum response and speed
  #50 token max length, temperature = 1.3 makes it creative
  chat_history_ids = model.generate(
          bot_input_ids,min_length =1, max_length=50,
          pad_token_id=tokenizer.eos_token_id,  
          no_repeat_ngram_size=3,       
          do_sample=True, 
          top_k=50, 
          top_p=0.9,
          temperature = temperature 
      )

  out_str = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
  return out_str

##you can use anyone from below
'''
| Macleod | Moran | Brenda | Ramirez | Peter Parker | Quentin Beck | Andy 
| Red | Norton | Willard | Chief | Chef | Kilgore | Kurtz | Westley | Buttercup 
| Vizzini | Fezzik | Inigo | Man In Black | Taylor | Zira | Zaius | Cornelius 
| Bud | Lindsey | Hippy | Erin | Ed | George | Donna | Trinity | Agent Smith 
| Morpheus | Neo | Tank | Meryl | Truman | Marlon | Christof | Stromboli | Bumstead 
| Schreber | Walker | Korben | Cornelius | Loc Rhod | Anakin | Obi-Wan | Palpatine 
| Padme | Superman | Luthor | Dude | Walter | Donny | Maude | General | Starkiller 
| Indiana | Willie | Short Round | John | Sarah | Terminator | Miller | Sarge | Reiben 
| Jackson | Upham | Chuckie | Will | Lambeau | Sean | Skylar | Saavik | Spock 
| Kirk | Bones | Khan | Kirk | Spock | Sybok | Scotty | Bourne | Pamela | Abbott 
| Nicky | Marshall | Korshunov | Troy | Vig | Archie Gates | Doc | Interrogator 
| Ellie | Ted | Peter | Drumlin | Joss | Macready | Childs | Nicholas | Conrad 
| Feingold | Christine | Adam | Barbara | Delia | Lydia | Cathy | Charles | Otho 
| Schaefer | Han | Luke | Leia | Threepio | Vader | Yoda | Lando | Elaine | Striker 
| Dr. Rumack | Kramer | David | Saavik | Kirk | Kruge | Holden | Deckard | Rachael 
| Batty | Sebastian | Sam | Frodo | Pippin | Gandalf | Kay | Edwards | Laurel 
| Edgar | Zed | Jay | Malloy | Plissken | Steve Rogers | Tony Stark | Scott Lang 
| Bruce Banner | Bruce | Edward | Two-Face | Batman | Chase | Alfred | Dick 
| Riddler | Din Djarin | Greef Karga | Kuiil | Ig-11 | Cara Dune | Peli Motto 
| Toro Calican | Ripley | Meredith | Dickie | Marge | Peter | Lambert | Kane 
| Dallas | Ripley | Ash | Parker | Threepio | Luke | Leia | Ben | Han | Common Bob 
| Common Alice | Jack | Tyler | Marla | Dana | Stantz | Venkman | Spengler | Louis 
| Fry | Johns | Riddick | Kirk | Decker | Spock | "Ilia | Indy | Belloq | Marion 
| Brother | Allnut | Rose | Qui-Gon | Jar Jar
'''

#some selected ones are in for demo use
personality_choices = ["Gandalf", "Riddick", "Macleod", "Morpheus", "Neo","Spock","Vader","Indy", "Ig-11","Threepio","Tony Stark","Batman","Vizzini"]

examples= ["Gandalf", "What is your name?"]

#History not implemented in this demo, use metayazar.com/chatbot for a movie and character dropdown chat interface
interface = gr.Interface(fn=get_chat_response, inputs=[gr.inputs.Dropdown(personality_choices) ,"text"], outputs="text")


if __name__ == "__main__":
    interface.launch()