martinjosifoski commited on
Commit
d2173fe
1 Parent(s): 6542487

Major overhaul to the OpenAIChatAtomicFlow.

Browse files
Files changed (2) hide show
  1. OpenAIChatAtomicFlow.py +131 -157
  2. OpenAIChatAtomicFlow.yaml +30 -1
OpenAIChatAtomicFlow.py CHANGED
@@ -1,9 +1,7 @@
1
- import pprint
2
  from copy import deepcopy
3
 
4
  import hydra
5
 
6
- import colorama
7
  import time
8
 
9
  from typing import List, Dict, Optional, Any
@@ -12,31 +10,32 @@ from langchain import PromptTemplate
12
  import langchain
13
  from langchain.schema import HumanMessage, AIMessage, SystemMessage
14
 
15
- from flows.history import FlowHistory
16
  from flows.message_annotators.abstract import MessageAnnotator
17
  from flows.base_flows.abstract import AtomicFlow
18
  from flows.datasets import GenericDemonstrationsDataset
19
 
20
  from flows import utils
21
- from flows.messages.chat_message import ChatMessage
22
  from flows.utils.caching_utils import flow_run_cache
 
23
 
24
  log = utils.get_pylogger(__name__)
25
 
 
 
26
 
27
  class OpenAIChatAtomicFlow(AtomicFlow):
28
- model_name: str
29
- generation_parameters: Dict
 
 
30
 
31
- system_message_prompt_template: PromptTemplate
32
- human_message_prompt_template: PromptTemplate
33
 
34
- system_name: str = "system"
35
- user_name: str = "user"
36
- assistant_name: str = "assistant"
37
 
38
- n_api_retries: int = 6
39
- wait_time_between_retries: int = 20
40
 
41
  query_message_prompt_template: Optional[PromptTemplate] = None
42
  demonstrations: GenericDemonstrationsDataset = None
@@ -44,9 +43,10 @@ class OpenAIChatAtomicFlow(AtomicFlow):
44
  response_annotators: Optional[Dict[str, MessageAnnotator]] = {}
45
 
46
  def __init__(self, **kwargs):
47
- self._validate_parameters(kwargs)
48
  super().__init__(**kwargs)
49
 
 
 
50
  assert self.flow_config["name"] not in [
51
  "system",
52
  "user",
@@ -55,29 +55,11 @@ class OpenAIChatAtomicFlow(AtomicFlow):
55
 
56
  def set_up_flow_state(self):
57
  super().set_up_flow_state()
58
- self.flow_state["conversation_initialized"] = False
59
 
60
  @classmethod
61
  def _validate_parameters(cls, kwargs):
62
- # ToDo: Deal with this in a cleaner way (with less repetition)
63
- super()._validate_parameters(kwargs)
64
-
65
- # ~~~ Model generation ~~~
66
- if "model_name" not in kwargs["flow_config"]:
67
- raise KeyError("model_name not specified in the flow_config.")
68
-
69
- if "generation_parameters" not in kwargs["flow_config"]:
70
- raise KeyError("generation_parameters not specified in the flow_config.")
71
-
72
- # ~~~ Prompting ~~~
73
- if "system_message_prompt_template" not in kwargs:
74
- raise KeyError("system_message_prompt_template not passed to the constructor.")
75
-
76
- if "query_message_prompt_template" not in kwargs:
77
- raise KeyError("query_message_prompt_template not passed to the constructor.")
78
-
79
- if "human_message_prompt_template" not in kwargs:
80
- raise KeyError("human_message_prompt_template not passed to the constructor.")
81
 
82
  @classmethod
83
  def _set_up_prompts(cls, config):
@@ -92,19 +74,20 @@ class OpenAIChatAtomicFlow(AtomicFlow):
92
 
93
  return kwargs
94
 
95
- @classmethod
96
- def _set_up_demonstration_templates(cls, config):
97
- kwargs = {}
98
-
99
- if "demonstrations_response_template" in config:
100
- kwargs["demonstrations_response_template"] = \
101
- hydra.utils.instantiate(config['demonstrations_response_template'], _convert_="partial")
102
-
103
- return kwargs
104
 
105
  @classmethod
106
  def _set_up_response_annotators(cls, config):
107
  response_annotators = config.get("response_annotators", {})
 
108
  if len(response_annotators) > 0:
109
  for key, config in response_annotators.items():
110
  response_annotators[key] = hydra.utils.instantiate(config, _convert_="partial")
@@ -119,8 +102,8 @@ class OpenAIChatAtomicFlow(AtomicFlow):
119
  # ~~~ Set up prompts ~~~
120
  kwargs.update(cls._set_up_prompts(flow_config))
121
 
122
- # ~~~ Set up demonstration templates ~~~
123
- kwargs.update(cls._set_up_demonstration_templates(flow_config))
124
 
125
  # ~~~ Set up response annotators ~~~
126
  kwargs.update(cls._set_up_response_annotators(flow_config))
@@ -129,9 +112,13 @@ class OpenAIChatAtomicFlow(AtomicFlow):
129
  return cls(**kwargs)
130
 
131
  def _is_conversation_initialized(self):
132
- return self.flow_state["conversation_initialized"]
 
 
 
133
 
134
- def expected_inputs_given_state(self):
 
135
  if self._is_conversation_initialized():
136
  return ["query"]
137
  else:
@@ -146,13 +133,13 @@ class OpenAIChatAtomicFlow(AtomicFlow):
146
  msg_content = prompt_template.format(**template_kwargs)
147
  return msg_content
148
 
149
- def _get_demonstration_query_message_content(self, sample_data: Dict):
150
- input_variables = self.query_message_prompt_template.input_variables
151
- return self.query_message_prompt_template.format(**{k: sample_data[k] for k in input_variables}), []
152
-
153
- def _get_demonstration_response_message_content(self, sample_data: Dict):
154
- input_variables = self.demonstrations_response_template.input_variables
155
- return self.demonstrations_response_template.format(**{k: sample_data[k] for k in input_variables}), []
156
 
157
  def _get_annotator_with_key(self, key: str):
158
  for _, ra in self.response_annotators.items():
@@ -162,75 +149,65 @@ class OpenAIChatAtomicFlow(AtomicFlow):
162
  def _response_parsing(self, response: str, expected_outputs: List[str]):
163
  target_annotators = [ra for _, ra in self.response_annotators.items() if ra.key in expected_outputs]
164
 
165
- if len(target_annotators) == 0:
166
- return {expected_outputs[0]: response}
167
-
168
  parsed_outputs = {}
169
  for ra in target_annotators:
170
  parsed_out = ra(response)
171
  parsed_outputs.update(parsed_out)
172
- return parsed_outputs
173
-
174
- def _add_demonstrations(self):
175
- if self.demonstrations is not None:
176
- for example in self.demonstrations:
177
- query, parents = self._get_demonstration_query_message_content(example)
178
- response, parents = self._get_demonstration_response_message_content(example)
179
-
180
- self._log_chat_message(content=query,
181
- message_creator=self.user_name,
182
- parent_message_ids=parents)
183
-
184
- self._log_chat_message(content=response,
185
- message_creator=self.assistant_name,
186
- parent_message_ids=parents)
187
-
188
- def _log_chat_message(self, message_creator: str, content: str, parent_message_ids: List[str] = None):
189
- chat_message = ChatMessage(
190
- message_creator=message_creator,
191
- parent_message_ids=parent_message_ids,
192
- flow_runner=self.flow_config["name"],
193
- flow_run_id=self.flow_run_id,
194
- content=content
195
- )
196
- return self._log_message(chat_message)
197
 
198
- def _initialize_conversation(self, input_data: Dict[str, Any]):
199
- # ~~~ Add the system message ~~~
200
- system_message_content = self._get_message(self.system_message_prompt_template, input_data)
201
-
202
- self._log_chat_message(content=system_message_content,
203
- message_creator=self.system_name)
204
-
205
- # ~~~ Add the demonstration query-response tuples (if any) ~~~
206
- self._add_demonstrations()
207
- self._update_state(update_data={"conversation_initialized": True})
208
-
209
- def get_conversation_messages(self, message_format: Optional[str] = None):
210
- messages = self.flow_state["history"].get_chat_messages()
211
 
212
- if message_format is None:
213
- return messages
 
214
 
215
- elif message_format == "open_ai":
216
- processed_messages = []
217
 
218
- for message in messages:
219
- if message.message_creator == self.system_name:
220
- processed_messages.append(SystemMessage(content=message.content))
221
- elif message.message_creator == self.assistant_name:
222
- processed_messages.append(AIMessage(content=message.content))
223
- elif message.message_creator == self.user_name:
224
- processed_messages.append(HumanMessage(content=message.content))
225
- else:
226
- raise ValueError(f"Unknown name: {message.message_creator}")
227
- return processed_messages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
  else:
229
- raise ValueError(
230
- f"Currently supported conversation message formats: 'open_ai'. '{message_format}' is not supported")
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
  def _call(self):
233
- api_key = self.flow_state["api_key"]
234
 
235
  backend = langchain.chat_models.ChatOpenAI(
236
  model_name=self.flow_config["model_name"],
@@ -238,15 +215,13 @@ class OpenAIChatAtomicFlow(AtomicFlow):
238
  **self.flow_config["generation_parameters"],
239
  )
240
 
241
- messages = self.get_conversation_messages(
242
- message_format="open_ai"
243
- )
244
 
245
  _success = False
246
  attempts = 1
247
  error = None
248
  response = None
249
- while attempts <= self.n_api_retries:
250
  try:
251
  response = backend(messages).content
252
  _success = True
@@ -254,70 +229,69 @@ class OpenAIChatAtomicFlow(AtomicFlow):
254
  except Exception as e:
255
  log.error(
256
  f"Error {attempts} in calling backend: {e}. Key used: `{api_key}`. "
257
- f"Retrying in {self.wait_time_between_retries} seconds..."
258
- )
259
- log.error(
260
- f"API call raised Exception with the following arguments arguments: "
261
- f"\n{self.flow_state['history'].to_string()}"
262
  )
 
 
 
 
263
  attempts += 1
264
- time.sleep(self.wait_time_between_retries)
265
  error = e
266
 
267
  if not _success:
268
  raise error
269
 
270
- if self.flow_config["verbose"]:
271
- messages_str = self.flow_state["history"].to_string()
272
- log.info(
273
- f"\n{colorama.Fore.MAGENTA}~~~ History [{self.flow_config['name']}] ~~~\n"
274
- f"{colorama.Style.RESET_ALL}{messages_str}"
275
- )
276
-
277
  return response
278
 
279
- def _prepare_conversation(self, input_data: Dict[str, Any]):
 
 
 
 
 
 
 
 
 
 
 
280
  if self._is_conversation_initialized():
281
- # ~~~ Check that the message has a `query` field ~~~
282
- user_message_content = self.human_message_prompt_template.format(query=input_data["query"])
283
 
284
  else:
 
285
  self._initialize_conversation(input_data)
 
286
  user_message_content = self._get_message(self.query_message_prompt_template, input_data)
287
 
288
- self._log_chat_message(message_creator=self.user_name,
289
- content=user_message_content)
290
 
291
  @flow_run_cache()
292
- def run(self, input_data: Dict[str, Any], expected_outputs: List[str]) -> Dict[str, Any]:
293
- # ~~~ Chat-specific preparation ~~~
294
- self._prepare_conversation(input_data)
 
 
 
 
 
 
295
 
296
  # ~~~ Call ~~~
297
  response = self._call()
298
- answer_message = self._log_chat_message(
299
- message_creator=self.flow_config["assistant_name"],
300
  content=response
301
  )
302
 
303
  # ~~~ Response parsing ~~~
304
- parsed_outputs = self._response_parsing(
305
  response=response,
306
- expected_outputs=expected_outputs
307
  )
308
- self._update_state(update_data=parsed_outputs)
309
-
310
- if self.flow_config["verbose"]:
311
- parsed_output_messages_str = pprint.pformat({k: m for k, m in parsed_outputs.items()},
312
- indent=4)
313
- log.info(
314
- f"\n{colorama.Fore.MAGENTA}~~~ "
315
- f"Response [{answer_message.message_creator} -- "
316
- f"{answer_message.message_id} -- "
317
- f"{answer_message.flow_run_id}] ~~~"
318
- f"\n{colorama.Fore.YELLOW}Content: {answer_message}{colorama.Style.RESET_ALL}"
319
- f"\n{colorama.Fore.YELLOW}Parsed Outputs: {parsed_output_messages_str}{colorama.Style.RESET_ALL}"
320
- )
321
-
322
- # ~~~ The final answer should be in self.flow_state, thus allow_class_namespace=False ~~~
323
- return self._get_keys_from_state(keys=expected_outputs, allow_class_namespace=False)
 
 
1
  from copy import deepcopy
2
 
3
  import hydra
4
 
 
5
  import time
6
 
7
  from typing import List, Dict, Optional, Any
 
10
  import langchain
11
  from langchain.schema import HumanMessage, AIMessage, SystemMessage
12
 
 
13
  from flows.message_annotators.abstract import MessageAnnotator
14
  from flows.base_flows.abstract import AtomicFlow
15
  from flows.datasets import GenericDemonstrationsDataset
16
 
17
  from flows import utils
18
+ from flows.messages.flow_message import UpdateMessage_ChatMessage
19
  from flows.utils.caching_utils import flow_run_cache
20
+ from flows.utils.general_helpers import validate_parameters
21
 
22
  log = utils.get_pylogger(__name__)
23
 
24
+ # ToDo: Add support for demonstrations
25
+
26
 
27
  class OpenAIChatAtomicFlow(AtomicFlow):
28
+ REQUIRED_KEYS_CONFIG = ["model_name", "generation_parameters"]
29
+ REQUIRED_KEYS_KWARGS = ["system_message_prompt_template",
30
+ "human_message_prompt_template",
31
+ "query_message_prompt_template"]
32
 
33
+ SUPPORTS_CACHING: bool = True
 
34
 
35
+ api_keys: Dict[str, str]
 
 
36
 
37
+ system_message_prompt_template: PromptTemplate
38
+ human_message_prompt_template: PromptTemplate
39
 
40
  query_message_prompt_template: Optional[PromptTemplate] = None
41
  demonstrations: GenericDemonstrationsDataset = None
 
43
  response_annotators: Optional[Dict[str, MessageAnnotator]] = {}
44
 
45
  def __init__(self, **kwargs):
 
46
  super().__init__(**kwargs)
47
 
48
+ self.api_keys = None
49
+
50
  assert self.flow_config["name"] not in [
51
  "system",
52
  "user",
 
55
 
56
  def set_up_flow_state(self):
57
  super().set_up_flow_state()
58
+ self.flow_state["previous_messages"] = []
59
 
60
  @classmethod
61
  def _validate_parameters(cls, kwargs):
62
+ validate_parameters(cls, kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  @classmethod
65
  def _set_up_prompts(cls, config):
 
74
 
75
  return kwargs
76
 
77
+ # @classmethod
78
+ # def _set_up_demonstration_templates(cls, config):
79
+ # kwargs = {}
80
+ #
81
+ # if "demonstrations_response_template" in config:
82
+ # kwargs["demonstrations_response_template"] = \
83
+ # hydra.utils.instantiate(config['demonstrations_response_template'], _convert_="partial")
84
+ #
85
+ # return kwargs
86
 
87
  @classmethod
88
  def _set_up_response_annotators(cls, config):
89
  response_annotators = config.get("response_annotators", {})
90
+ response_annotators = deepcopy(response_annotators)
91
  if len(response_annotators) > 0:
92
  for key, config in response_annotators.items():
93
  response_annotators[key] = hydra.utils.instantiate(config, _convert_="partial")
 
102
  # ~~~ Set up prompts ~~~
103
  kwargs.update(cls._set_up_prompts(flow_config))
104
 
105
+ # # ~~~ Set up demonstration templates ~~~
106
+ # kwargs.update(cls._set_up_demonstration_templates(flow_config))
107
 
108
  # ~~~ Set up response annotators ~~~
109
  kwargs.update(cls._set_up_response_annotators(flow_config))
 
112
  return cls(**kwargs)
113
 
114
  def _is_conversation_initialized(self):
115
+ if len(self.flow_state["previous_messages"]) > 0:
116
+ return True
117
+
118
+ return False
119
 
120
+ def get_expected_inputs(self, data: Optional[Dict[str, Any]] = None):
121
+ """Returns the expected inputs for the flow given the current state and, optionally, the input data"""
122
  if self._is_conversation_initialized():
123
  return ["query"]
124
  else:
 
133
  msg_content = prompt_template.format(**template_kwargs)
134
  return msg_content
135
 
136
+ # def _get_demonstration_query_message_content(self, sample_data: Dict):
137
+ # input_variables = self.query_message_prompt_template.input_variables
138
+ # return self.query_message_prompt_template.format(**{k: sample_data[k] for k in input_variables}), []
139
+ #
140
+ # def _get_demonstration_response_message_content(self, sample_data: Dict):
141
+ # input_variables = self.demonstrations_response_template.input_variables
142
+ # return self.demonstrations_response_template.format(**{k: sample_data[k] for k in input_variables}), []
143
 
144
  def _get_annotator_with_key(self, key: str):
145
  for _, ra in self.response_annotators.items():
 
149
  def _response_parsing(self, response: str, expected_outputs: List[str]):
150
  target_annotators = [ra for _, ra in self.response_annotators.items() if ra.key in expected_outputs]
151
 
 
 
 
152
  parsed_outputs = {}
153
  for ra in target_annotators:
154
  parsed_out = ra(response)
155
  parsed_outputs.update(parsed_out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
+ if "raw_response" in expected_outputs:
158
+ parsed_outputs["raw_response"] = response
159
+ else:
160
+ log.warning("The raw response is not logged because it was not requested as per the expected output.")
 
 
 
 
 
 
 
 
 
161
 
162
+ if len(parsed_outputs) == 0:
163
+ raise Exception(f"The output dictionary is empty. "
164
+ f"None of the expected outputs: `{str(expected_outputs)}` were found.")
165
 
166
+ return parsed_outputs
 
167
 
168
+ # def _add_demonstrations(self):
169
+ # if self.demonstrations is not None:
170
+ # for example in self.demonstrations:
171
+ # query, parents = self._get_demonstration_query_message_content(example)
172
+ # response, parents = self._get_demonstration_response_message_content(example)
173
+ #
174
+ # self._log_chat_message(content=query,
175
+ # role=self.user_name,
176
+ # parent_message_ids=parents)
177
+ #
178
+ # self._log_chat_message(content=response,
179
+ # role=self.assistant_name,
180
+ # parent_message_ids=parents)
181
+
182
+ def _state_update_add_chat_message(self,
183
+ role: str,
184
+ content: str) -> None:
185
+
186
+ # Add the message to the previous messages list
187
+ if role == self.flow_config["system_name"]:
188
+ self.flow_state["previous_messages"].append(SystemMessage(content=content))
189
+ elif role == self.flow_config["user_name"]:
190
+ self.flow_state["previous_messages"].append(HumanMessage(content=content))
191
+ elif role == self.flow_config["assistant_name"]:
192
+ self.flow_state["previous_messages"].append(AIMessage(content=content))
193
  else:
194
+ raise Exception(f"Invalid role: `{role}`.\n"
195
+ f"Role should be one of: "
196
+ f"`{self.flow_config['system_name']}`, "
197
+ f"`{self.flow_config['user_name']}`, "
198
+ f"`{self.flow_config['assistant_name']}`")
199
+
200
+ # Log the update to the flow messages list
201
+ chat_message = UpdateMessage_ChatMessage(
202
+ created_by=self.flow_config["name"],
203
+ updated_flow=self.flow_config["name"],
204
+ role=role,
205
+ content=content,
206
+ )
207
+ self._log_message(chat_message)
208
 
209
  def _call(self):
210
+ api_key = self.api_keys["openai"]
211
 
212
  backend = langchain.chat_models.ChatOpenAI(
213
  model_name=self.flow_config["model_name"],
 
215
  **self.flow_config["generation_parameters"],
216
  )
217
 
218
+ messages = self.flow_state["previous_messages"]
 
 
219
 
220
  _success = False
221
  attempts = 1
222
  error = None
223
  response = None
224
+ while attempts <= self.flow_config['n_api_retries']:
225
  try:
226
  response = backend(messages).content
227
  _success = True
 
229
  except Exception as e:
230
  log.error(
231
  f"Error {attempts} in calling backend: {e}. Key used: `{api_key}`. "
232
+ f"Retrying in {self.flow_config['wait_time_between_retries']} seconds..."
 
 
 
 
233
  )
234
+ # log.error(
235
+ # f"The API call raised an exception with the following arguments: "
236
+ # f"\n{self.flow_state['history'].to_string()}"
237
+ # ) # ToDo: Make this message more user-friendly
238
  attempts += 1
239
+ time.sleep(self.flow_config['wait_time_between_retries'])
240
  error = e
241
 
242
  if not _success:
243
  raise error
244
 
 
 
 
 
 
 
 
245
  return response
246
 
247
+ def _initialize_conversation(self, input_data: Dict[str, Any]):
248
+ # ~~~ Add the system message ~~~
249
+ system_message_content = self._get_message(self.system_message_prompt_template, input_data)
250
+
251
+ self._state_update_add_chat_message(content=system_message_content,
252
+ role=self.flow_config["system_name"])
253
+
254
+ # # ~~~ Add the demonstration query-response tuples (if any) ~~~
255
+ # self._add_demonstrations()
256
+ # self._update_state(update_data={"conversation_initialized": True})
257
+
258
+ def _process_input(self, input_data: Dict[str, Any]):
259
  if self._is_conversation_initialized():
260
+ # Construct the message using the human message prompt template
261
+ user_message_content = self._get_message(self.human_message_prompt_template, input_data)
262
 
263
  else:
264
+ # Initialize the conversation (add the system message, and potentially the demonstrations)
265
  self._initialize_conversation(input_data)
266
+ # Construct the message using the query message prompt template
267
  user_message_content = self._get_message(self.query_message_prompt_template, input_data)
268
 
269
+ self._state_update_add_chat_message(role=self.flow_config["user_name"],
270
+ content=user_message_content)
271
 
272
  @flow_run_cache()
273
+ def run(self,
274
+ input_data: Dict[str, Any],
275
+ private_keys: Optional[List[str]] = [],
276
+ keys_to_ignore_for_hash: Optional[List[str]] = []) -> Dict[str, Any]:
277
+ self.api_keys = input_data["api_keys"]
278
+ del input_data["api_keys"]
279
+
280
+ # ~~~ Process input ~~~
281
+ self._process_input(input_data)
282
 
283
  # ~~~ Call ~~~
284
  response = self._call()
285
+ self._state_update_add_chat_message(
286
+ role=self.flow_config["assistant_name"],
287
  content=response
288
  )
289
 
290
  # ~~~ Response parsing ~~~
291
+ output_data = self._response_parsing(
292
  response=response,
293
+ expected_outputs=input_data["expected_outputs"]
294
  )
295
+ # self._state_update_dict(update_data=output_data) # ToDo: Is this necessary? When?
296
+
297
+ return output_data
 
 
 
 
 
 
 
 
 
 
 
 
 
OpenAIChatAtomicFlow.yaml CHANGED
@@ -1,5 +1,16 @@
1
  # This is an abstract flow, therefore some required fields are not defined (and must be defined by the concrete flow)
2
 
 
 
 
 
 
 
 
 
 
 
 
3
  n_api_retries: 6
4
  wait_time_between_retries: 20
5
 
@@ -9,6 +20,24 @@ assistant_name: assistant
9
 
10
  response_annotators: {}
11
 
12
- query_message_prompt_template: null # ToDo: When will this be null?
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  demonstrations: null
14
  demonstrations_response_template: null
 
1
  # This is an abstract flow, therefore some required fields are not defined (and must be defined by the concrete flow)
2
 
3
+ model_name: "gpt-4"
4
+ generation_parameters:
5
+ n: 1
6
+ max_tokens: 3000
7
+ temperature: 0.3
8
+
9
+ model_kwargs:
10
+ top_p: 0.2
11
+ frequency_penalty: 0
12
+ presence_penalty: 0
13
+
14
  n_api_retries: 6
15
  wait_time_between_retries: 20
16
 
 
20
 
21
  response_annotators: {}
22
 
23
+ system_message_prompt_template:
24
+ _target_: langchain.PromptTemplate
25
+ template_format: jinja2
26
+
27
+ user_message_prompt_template:
28
+ _target_: langchain.PromptTemplate
29
+ template_format: jinja2
30
+
31
+ human_message_prompt_template:
32
+ _target_: langchain.PromptTemplate
33
+ template_format: jinja2
34
+
35
+ query_message_prompt_template:
36
+ _target_: langchain.PromptTemplate
37
+ template: "{{query}}"
38
+ input_variables:
39
+ - "query"
40
+ template_format: jinja2
41
+
42
  demonstrations: null
43
  demonstrations_response_template: null