martinjosifoski commited on
Commit
0949dd4
1 Parent(s): d2173fe

Incorporate feedback on PR.

Browse files
Files changed (1) hide show
  1. OpenAIChatAtomicFlow.py +7 -7
OpenAIChatAtomicFlow.py CHANGED
@@ -117,12 +117,12 @@ class OpenAIChatAtomicFlow(AtomicFlow):
117
 
118
  return False
119
 
120
- def get_expected_inputs(self, data: Optional[Dict[str, Any]] = None):
121
  """Returns the expected inputs for the flow given the current state and, optionally, the input data"""
122
  if self._is_conversation_initialized():
123
  return ["query"]
124
  else:
125
- return self.flow_config["expected_inputs"]
126
 
127
  @staticmethod
128
  def _get_message(prompt_template, input_data: Dict[str, Any]):
@@ -146,22 +146,22 @@ class OpenAIChatAtomicFlow(AtomicFlow):
146
  if ra.key == key:
147
  return ra
148
 
149
- def _response_parsing(self, response: str, expected_outputs: List[str]):
150
- target_annotators = [ra for _, ra in self.response_annotators.items() if ra.key in expected_outputs]
151
 
152
  parsed_outputs = {}
153
  for ra in target_annotators:
154
  parsed_out = ra(response)
155
  parsed_outputs.update(parsed_out)
156
 
157
- if "raw_response" in expected_outputs:
158
  parsed_outputs["raw_response"] = response
159
  else:
160
  log.warning("The raw response is not logged because it was not requested as per the expected output.")
161
 
162
  if len(parsed_outputs) == 0:
163
  raise Exception(f"The output dictionary is empty. "
164
- f"None of the expected outputs: `{str(expected_outputs)}` were found.")
165
 
166
  return parsed_outputs
167
 
@@ -290,7 +290,7 @@ class OpenAIChatAtomicFlow(AtomicFlow):
290
  # ~~~ Response parsing ~~~
291
  output_data = self._response_parsing(
292
  response=response,
293
- expected_outputs=input_data["expected_outputs"]
294
  )
295
  # self._state_update_dict(update_data=output_data) # ToDo: Is this necessary? When?
296
 
 
117
 
118
  return False
119
 
120
+ def get_input_keys(self, data: Optional[Dict[str, Any]] = None):
121
  """Returns the expected inputs for the flow given the current state and, optionally, the input data"""
122
  if self._is_conversation_initialized():
123
  return ["query"]
124
  else:
125
+ return self.flow_config["input_keys"]
126
 
127
  @staticmethod
128
  def _get_message(prompt_template, input_data: Dict[str, Any]):
 
146
  if ra.key == key:
147
  return ra
148
 
149
+ def _response_parsing(self, response: str, output_keys: List[str]):
150
+ target_annotators = [ra for _, ra in self.response_annotators.items() if ra.key in output_keys]
151
 
152
  parsed_outputs = {}
153
  for ra in target_annotators:
154
  parsed_out = ra(response)
155
  parsed_outputs.update(parsed_out)
156
 
157
+ if "raw_response" in output_keys:
158
  parsed_outputs["raw_response"] = response
159
  else:
160
  log.warning("The raw response is not logged because it was not requested as per the expected output.")
161
 
162
  if len(parsed_outputs) == 0:
163
  raise Exception(f"The output dictionary is empty. "
164
+ f"None of the expected outputs: `{str(output_keys)}` were found.")
165
 
166
  return parsed_outputs
167
 
 
290
  # ~~~ Response parsing ~~~
291
  output_data = self._response_parsing(
292
  response=response,
293
+ output_keys=input_data["output_keys"]
294
  )
295
  # self._state_update_dict(update_data=output_data) # ToDo: Is this necessary? When?
296