crscardellino commited on
Commit
cad4540
1 Parent(s): b5e125d

Build a basic CLI app for the chatbot. Improve the way to read the prompt.

Browse files
Files changed (3) hide show
  1. .gitignore +2 -0
  2. model.py → chatbot.py +112 -42
  3. prompt.txt +6 -0
.gitignore CHANGED
@@ -158,3 +158,5 @@ cython_debug/
158
  # and can be added to the global gitignore or merged into this file. For a more nuclear
159
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
  #.idea/
 
 
 
158
  # and can be added to the global gitignore or merged into this file. For a more nuclear
159
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
  #.idea/
161
+
162
+ .vscode/
model.py → chatbot.py RENAMED
@@ -1,30 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  from typing import Optional, Union
3
 
4
 
5
  class ChatBot:
6
  """
7
- Chatbot based on the notebook [How to generate text: using different
8
- decoding methods for language generation with
9
- Transformers](https://github.com/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)
10
- and the blog post [Create conversational agents using BLOOM:
11
- Part-1](https://medium.com/@fractal.ai/create-conversational-agents-using-bloom-part-1-63a66e6321c0).
12
-
13
- This code needs testing, as it is not fitted for a production model.
14
-
15
- It's a very basic chatbot that uses Causal Language Models from Transformers given an PROMPT.
16
-
17
- An example of a basic PROMPT is given by the BASE_PROMPT attribute.
18
 
19
  Parameters
20
  ----------
21
  base_model : str | AutoModelForCausalLM
22
  A name (path in hugging face hub) for a model, or the model itself.
23
  tokenizer : AutoTokenizer | None
24
- Needed in case the base_model is a given model, otherwise it will load the same model
25
- given by the base_model path.
26
  initial_prompt : str
27
- A prompt for the model. Should follow the example given in `BASE_PROMPT`
 
28
  keep_context : bool
29
  Whether to accumulate the context as the chatbot is used.
30
  creative : bool
@@ -34,23 +45,13 @@ class ChatBot:
34
  max_tokens : int
35
  Max number of tokens to generate in the chat.
36
  human_identifier : str
37
- The string that will identify the human speaker in the prompt (e.g. HUMAN).
 
38
  bot_identifier : str
39
- The string that will identify the bot speaker in the prompt (e.g. EXPERT).
 
40
  """
41
 
42
- BASE_PROMPT = """
43
- The following is a conversation with a movie EXPERT.
44
- The EXPERT helps the HUMAN define their personal preferences and provide
45
- multiple options to select from, it also helps in selecting the best option.
46
- The EXPERT is conversational, optimistic, flexible, empathetic, creative and
47
- humanly in generating responses.
48
-
49
- HUMAN: Hello, how are you?
50
- EXPERT: Fine, thanks. I am here to help you by recommending movies.
51
- """.strip()
52
-
53
-
54
  def __init__(self,
55
  base_model: Union[str, AutoModelForCausalLM],
56
  tokenizer: Optional[AutoTokenizer] = None,
@@ -58,8 +59,8 @@ class ChatBot:
58
  keep_context: bool = False,
59
  creative: bool = False,
60
  max_tokens: int = 50,
61
- human_identifier: str = "HUMAN",
62
- bot_identifier: str = "EXPERT"):
63
  if isinstance(base_model, str):
64
  self.model = AutoModelForCausalLM.from_pretrained(
65
  base_model,
@@ -73,7 +74,12 @@ class ChatBot:
73
  self.model = base_model
74
  self.tokenizer = tokenizer
75
 
76
- self.initial_prompt = initial_prompt if initial_prompt is not None else self.BASE_PROMPT
 
 
 
 
 
77
  self.keep_context = keep_context
78
  self.context = ''
79
  self.creative = creative
@@ -85,11 +91,12 @@ class ChatBot:
85
  """
86
  Generates a response from the prompt (and optionally the context) where
87
  it adds the `input_text` as if it was part of the HUMAN dialog
88
- (identified by `self.human_identifier`), and prompts the bot (identified
89
- by `self.bot_identifier`) for a response. As the bot might continue the
90
- conversation beyond the scope, it trims the output so it only shows the
91
- first dialog given by the bot, following the idea presented in the
92
- Medium blog post for creating conversational agents (link above).
 
93
 
94
  Parameters
95
  ----------
@@ -101,31 +108,94 @@ class ChatBot:
101
  str
102
  The output given by the bot, trimmed for better control.
103
  """
 
 
 
104
  prompt = self.initial_prompt + self.context
105
  prompt += f'{self.human_identifier}: {input_text}\n'
106
- prompt += f'{self.bot_identifier}: '
107
 
108
  input_ids = self.tokenizer.encode(prompt, return_tensors='pt')
109
  if self.creative:
 
 
110
  output = self.model.generate(
111
  input_ids,
112
  do_sample=True,
113
  max_length=input_ids.shape[1] + self.max_tokens,
114
  top_k=50,
115
- top_p=0.95,
116
- num_return_sequences=1
117
  )[0]
118
  else:
 
119
  output = self.model.generate(
120
  input_ids,
121
  max_length=input_ids.shape[1] + self.max_tokens
122
  )[0]
123
 
 
 
124
  decoded_output = self.tokenizer.decode(output, skip_special_tokens=True)
 
 
125
  trimmed_output = decoded_output[len(prompt):]
 
 
 
126
  trimmed_output = trimmed_output[:trimmed_output.find(f'{self.human_identifier}:')]
127
 
128
  if self.keep_context:
129
- self.context += trimmed_output
130
-
131
- return trimmed_output.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chatbot based on the notebook [How to generate text: using different decoding
3
+ methods for language generation with
4
+ Transformers](https://github.com/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)
5
+ and the blog post [Create conversational agents using BLOOM:
6
+ Part-1](https://medium.com/@fractal.ai/create-conversational-agents-using-bloom-part-1-63a66e6321c0).
7
+
8
+ This code needs testing, as it is not fitted for a production model.
9
+
10
+ It's a very basic chatbot that uses Causal Language Models from Transformers
11
+ given an PROMPT.
12
+
13
+ An example of a basic PROMPT is given in the file `prompt.txt` for a Spanish
14
+ prompt.
15
+ """
16
+
17
+ import argparse
18
+ import torch
19
+
20
  from transformers import AutoModelForCausalLM, AutoTokenizer
21
  from typing import Optional, Union
22
 
23
 
24
  class ChatBot:
25
  """
26
+ Main class wrapper around the transformers models in order to build a basic
27
+ chatbot application.
 
 
 
 
 
 
 
 
 
28
 
29
  Parameters
30
  ----------
31
  base_model : str | AutoModelForCausalLM
32
  A name (path in hugging face hub) for a model, or the model itself.
33
  tokenizer : AutoTokenizer | None
34
+ Needed in case the base_model is a given model, otherwise it will load
35
+ the same model given by the base_model path.
36
  initial_prompt : str
37
+ A prompt for the model. Should follow the example given in
38
+ `BASE_PROMPT`
39
  keep_context : bool
40
  Whether to accumulate the context as the chatbot is used.
41
  creative : bool
 
45
  max_tokens : int
46
  Max number of tokens to generate in the chat.
47
  human_identifier : str
48
+ The string that will identify the human speaker in the prompt (e.g.
49
+ HUMAN).
50
  bot_identifier : str
51
+ The string that will identify the bot speaker in the prompt (e.g.
52
+ EXPERT).
53
  """
54
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  def __init__(self,
56
  base_model: Union[str, AutoModelForCausalLM],
57
  tokenizer: Optional[AutoTokenizer] = None,
 
59
  keep_context: bool = False,
60
  creative: bool = False,
61
  max_tokens: int = 50,
62
+ human_identifier: str = 'HUMAN',
63
+ bot_identifier: str = 'EXPERT'):
64
  if isinstance(base_model, str):
65
  self.model = AutoModelForCausalLM.from_pretrained(
66
  base_model,
 
74
  self.model = base_model
75
  self.tokenizer = tokenizer
76
 
77
+ if initial_prompt is None:
78
+ with open('./prompt.txt', 'r') as fh:
79
+ self.initial_prompt = fh.read()
80
+ else:
81
+ self.initial_prompt = initial_prompt
82
+
83
  self.keep_context = keep_context
84
  self.context = ''
85
  self.creative = creative
 
91
  """
92
  Generates a response from the prompt (and optionally the context) where
93
  it adds the `input_text` as if it was part of the HUMAN dialog
94
+ (identified by `self.human_identifier`), and prompts the bot
95
+ (identified by `self.bot_identifier`) for a response. As the bot might
96
+ continue the conversation beyond the scope, it trims the output so it
97
+ only shows the first dialog given by the bot, following the idea
98
+ presented in the Medium blog post for creating conversational agents
99
+ (link above).
100
 
101
  Parameters
102
  ----------
 
108
  str
109
  The output given by the bot, trimmed for better control.
110
  """
111
+ # Setup the prompt given the initial prompt and add the words that
112
+ # start the dialog between the human and the bot. Give space for the
113
+ # model to continue from the prompt
114
  prompt = self.initial_prompt + self.context
115
  prompt += f'{self.human_identifier}: {input_text}\n'
116
+ prompt += f'{self.bot_identifier}: ' # check the space after the colon
117
 
118
  input_ids = self.tokenizer.encode(prompt, return_tensors='pt')
119
  if self.creative:
120
+ # In case you want the bot to be creative, we sample using `top_k`
121
+ # and `top_p`
122
  output = self.model.generate(
123
  input_ids,
124
  do_sample=True,
125
  max_length=input_ids.shape[1] + self.max_tokens,
126
  top_k=50,
127
+ top_p=0.95
 
128
  )[0]
129
  else:
130
+ # Otherwise we return the most probable token
131
  output = self.model.generate(
132
  input_ids,
133
  max_length=input_ids.shape[1] + self.max_tokens
134
  )[0]
135
 
136
+ # Decode the output, removing special tokens for the model (like
137
+ # `[CLS]` and similar)
138
  decoded_output = self.tokenizer.decode(output, skip_special_tokens=True)
139
+
140
+ # Trim the output, first by removing the original prompt
141
  trimmed_output = decoded_output[len(prompt):]
142
+
143
+ # Then we find the stop token, in this case the human identifier, and
144
+ # we get up to that point
145
  trimmed_output = trimmed_output[:trimmed_output.find(f'{self.human_identifier}:')]
146
 
147
  if self.keep_context:
148
+ # If we want to keep the context of the conversation we add the
149
+ # trimmed output so far
150
+ self.context += prompt + trimmed_output
151
+
152
+ return trimmed_output.strip() # we only return the trimmed output
153
+
154
+
155
+ if __name__ == '__main__':
156
+ parser = argparse.ArgumentParser()
157
+ parser.add_argument('--model-name', '-m',
158
+ default='bigscience/bloom-560m',
159
+ help="Name of the base model to use for the chatbot")
160
+ parser.add_argument('--prompt', '-p',
161
+ default='./prompt.txt',
162
+ help="Path to the file with the prompt to use")
163
+ parser.add_argument('--keep-context', '-k',
164
+ action='store_true',
165
+ help="Keep context of the conversation.")
166
+ parser.add_argument('--creative', '-c',
167
+ action='store_true',
168
+ help="Make the bot creative when answering.")
169
+ parser.add_argument('--random-seed', '-r',
170
+ default=42,
171
+ help="Seed number for the creative bot.",
172
+ type=int)
173
+ parser.add_argument('--human-identifier', '-i',
174
+ default='HUMANO',
175
+ help="Name of the human identifier.")
176
+ parser.add_argument('--bot-identifier', '-b',
177
+ default='EXPERTO',
178
+ help="Name of the bot identifier.")
179
+
180
+ args = parser.parse_args()
181
+
182
+ torch.manual_seed(args.random_seed)
183
+
184
+ with open(args.prompt, 'r') as fh:
185
+ initial_prompt = fh.read()
186
+
187
+ chatbot = ChatBot(
188
+ base_model=args.model_name,
189
+ initial_prompt=initial_prompt,
190
+ keep_context=args.keep_context,
191
+ creative=args.creative,
192
+ human_identifier=args.human_identifier,
193
+ bot_identifier=args.bot_identifier
194
+ )
195
+
196
+ print("Write `exit` or `quit` to quit")
197
+ while True:
198
+ input_text = input('> ')
199
+ if input_text == 'exit' or input_text == 'quit':
200
+ break
201
+ print(chatbot.chat(input_text))
prompt.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ La siguiente es una conversación entre un HUMANO y un bot EXPERTO en software libre.
2
+ El EXPERTO le ayuda al HUMANO con preguntas acerca de software libre.
3
+ El EXPERTO es conversacional, optimista, flexible, creativo y genera respuestas parecidas a un humano.
4
+
5
+ HUMANO: Hola, ¿Cómo estás?
6
+ EXPERTO: Hola, muy bien. Estoy acá para ayudarte con preguntas respecto al software libre.