nbaldwin commited on
Commit
297c90d
1 Parent(s): a749ad1

renamed flows to aiflows

Browse files
Files changed (5) hide show
  1. ChatAtomicFlow.py +12 -12
  2. ChatAtomicFlow.yaml +5 -5
  3. README.md +7 -7
  4. demo.yaml +7 -7
  5. run.py +7 -7
ChatAtomicFlow.py CHANGED
@@ -5,14 +5,14 @@ import time
5
 
6
  from typing import Dict, Optional, Any
7
 
8
- from flows.base_flows import AtomicFlow
9
 
10
- from flows.utils import logging
11
- from flows.messages.flow_message import UpdateMessage_ChatMessage
12
 
13
- from flows.prompt_template import JinjaPrompt
14
 
15
- from flows.backends.llm_lite import LiteLLMBackend
16
 
17
  log = logging.get_logger(__name__)
18
 
@@ -34,7 +34,7 @@ class ChatAtomicFlow(AtomicFlow):
34
  - `assistant_name` (str): The name of the assistant (roles of LLM). Default: "assistant"
35
  - `backend` Dict[str,Any]: The backend of the flow. Used to call models via an API.
36
  See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers.
37
- The default parameters of the backend are all defined at flows.backends.llm_lite.LiteLLMBackend
38
  (also see the defaul parameters of litellm's completion parameters: https://docs.litellm.ai/docs/completion/input#input-params-1).
39
  Except for the following parameters who are overwritten by the ChatAtomicFlow in ChatAtomicFlow.yaml:
40
  - `model_name` (Union[Dict[str,str],str]): The name of the model to use.
@@ -50,15 +50,15 @@ class ChatAtomicFlow(AtomicFlow):
50
  - `presence_penalty` (number): It is used to penalize new tokens based on their existence in the text so far. Default: 0.0
51
  - `stream` (bool): Whether to stream the response or not. Default: True
52
  - `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
53
- By default its of type flows.prompt_template.JinjaPrompt.
54
  None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
55
- Default parameters are defined in flows.prompt_template.jinja2_prompts.JinjaPrompt.
56
  - `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
57
  (first time in). It is used to generate the human message. It's passed as the user message to the LLM.
58
- By default its of type flows.prompt_template.JinjaPrompt. None of the parameters of the prompt are defined by default and therefore need to be defined if one
59
- wants to use the init_human_message_prompt_template. Default parameters are defined in flows.prompt_template.jinja2_prompts.JinjaPrompt.
60
  - `human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message (message used everytime the except the first time in).
61
- It's passed as the user message to the LLM. By default its of type flows.prompt_template.JinjaPrompt and has the following parameters:
62
  - `template` (str): The template of the human message. Default: see ChatAtomicFlow.yaml for the default value.
63
  - `input_variables` (List[str]): The input variables of the human message prompt template. Default: ["query"]
64
  - `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
@@ -87,7 +87,7 @@ class ChatAtomicFlow(AtomicFlow):
87
  :type init_human_message_prompt_template: Optional[JinjaPrompt]
88
  :param backend: The backend of the flow. It is a LLM that is queried via an API. See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers
89
  :type backend: LiteLLMBackend
90
- :param \**kwargs: Additional arguments to pass to the flow. See :class:`flows.base_flows.AtomicFlow` for more details.
91
  """
92
  REQUIRED_KEYS_CONFIG = ["backend"]
93
 
 
5
 
6
  from typing import Dict, Optional, Any
7
 
8
+ from aiflows.base_flows import AtomicFlow
9
 
10
+ from aiflows.utils import logging
11
+ from aiflows.messages.flow_message import UpdateMessage_ChatMessage
12
 
13
+ from aiflows.prompt_template import JinjaPrompt
14
 
15
+ from aiflows.backends.llm_lite import LiteLLMBackend
16
 
17
  log = logging.get_logger(__name__)
18
 
 
34
  - `assistant_name` (str): The name of the assistant (roles of LLM). Default: "assistant"
35
  - `backend` Dict[str,Any]: The backend of the flow. Used to call models via an API.
36
  See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers.
37
+ The default parameters of the backend are all defined at aiflows.backends.llm_lite.LiteLLMBackend
38
  (also see the defaul parameters of litellm's completion parameters: https://docs.litellm.ai/docs/completion/input#input-params-1).
39
  Except for the following parameters who are overwritten by the ChatAtomicFlow in ChatAtomicFlow.yaml:
40
  - `model_name` (Union[Dict[str,str],str]): The name of the model to use.
 
50
  - `presence_penalty` (number): It is used to penalize new tokens based on their existence in the text so far. Default: 0.0
51
  - `stream` (bool): Whether to stream the response or not. Default: True
52
  - `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
53
+ By default its of type aiflows.prompt_template.JinjaPrompt.
54
  None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
55
+ Default parameters are defined in aiflows.prompt_template.jinja2_prompts.JinjaPrompt.
56
  - `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
57
  (first time in). It is used to generate the human message. It's passed as the user message to the LLM.
58
+ By default its of type aiflows.prompt_template.JinjaPrompt. None of the parameters of the prompt are defined by default and therefore need to be defined if one
59
+ wants to use the init_human_message_prompt_template. Default parameters are defined in aiflows.prompt_template.jinja2_prompts.JinjaPrompt.
60
  - `human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message (message used everytime the except the first time in).
61
+ It's passed as the user message to the LLM. By default its of type aiflows.prompt_template.JinjaPrompt and has the following parameters:
62
  - `template` (str): The template of the human message. Default: see ChatAtomicFlow.yaml for the default value.
63
  - `input_variables` (List[str]): The input variables of the human message prompt template. Default: ["query"]
64
  - `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
 
87
  :type init_human_message_prompt_template: Optional[JinjaPrompt]
88
  :param backend: The backend of the flow. It is a LLM that is queried via an API. See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers
89
  :type backend: LiteLLMBackend
90
+ :param \**kwargs: Additional arguments to pass to the flow. See :class:`aiflows.base_flows.AtomicFlow` for more details.
91
  """
92
  REQUIRED_KEYS_CONFIG = ["backend"]
93
 
ChatAtomicFlow.yaml CHANGED
@@ -11,7 +11,7 @@ user_name: user
11
  assistant_name: assistant
12
 
13
  backend:
14
- _target_: flows.backends.llm_lite.LiteLLMBackend
15
  api_infos: ???
16
  model_name: "gpt-3.5-turbo"
17
  n: 1
@@ -26,14 +26,14 @@ backend:
26
 
27
 
28
  system_message_prompt_template:
29
- _target_: flows.prompt_template.JinjaPrompt
30
 
31
 
32
  init_human_message_prompt_template:
33
- _target_: flows.prompt_template.JinjaPrompt
34
 
35
  human_message_prompt_template:
36
- _target_: flows.prompt_template.JinjaPrompt
37
  template: "{{query}}"
38
  input_variables:
39
  - "query"
@@ -41,7 +41,7 @@ input_interface_initialized:
41
  - "query"
42
 
43
  query_message_prompt_template:
44
- _target_: flows.prompt_template.JinjaPrompt
45
 
46
 
47
  previous_messages:
 
11
  assistant_name: assistant
12
 
13
  backend:
14
+ _target_: aiflows.backends.llm_lite.LiteLLMBackend
15
  api_infos: ???
16
  model_name: "gpt-3.5-turbo"
17
  n: 1
 
26
 
27
 
28
  system_message_prompt_template:
29
+ _target_: aiflows.prompt_template.JinjaPrompt
30
 
31
 
32
  init_human_message_prompt_template:
33
+ _target_: aiflows.prompt_template.JinjaPrompt
34
 
35
  human_message_prompt_template:
36
+ _target_: aiflows.prompt_template.JinjaPrompt
37
  template: "{{query}}"
38
  input_variables:
39
  - "query"
 
41
  - "query"
42
 
43
  query_message_prompt_template:
44
+ _target_: aiflows.prompt_template.JinjaPrompt
45
 
46
 
47
  previous_messages:
README.md CHANGED
@@ -40,7 +40,7 @@ Default: "Flow which uses as tool an LLM though an API"
40
  - `assistant_name` (str): The name of the assistant (roles of LLM). Default: "assistant"
41
  - `backend` Dict[str,Any]: The backend of the flow. Used to call models via an API.
42
  See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers.
43
- The default parameters of the backend are all defined at flows.backends.llm_lite.LiteLLMBackend
44
  (also see the defaul parameters of litellm's completion parameters: https://docs.litellm.ai/docs/completion/input#input-params-1).
45
  Except for the following parameters who are overwritten by the ChatAtomicFlow in ChatAtomicFlow.yaml:
46
  - `model_name` (Union[Dict[str,str],str]): The name of the model to use.
@@ -56,15 +56,15 @@ Except for the following parameters who are overwritten by the ChatAtomicFlow in
56
  - `presence_penalty` (number): It is used to penalize new tokens based on their existence in the text so far. Default: 0.0
57
  - `stream` (bool): Whether to stream the response or not. Default: True
58
  - `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
59
- By default its of type flows.prompt_template.JinjaPrompt.
60
  None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
61
- Default parameters are defined in flows.prompt_template.jinja2_prompts.JinjaPrompt.
62
  - `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
63
  (first time in). It is used to generate the human message. It's passed as the user message to the LLM.
64
- By default its of type flows.prompt_template.JinjaPrompt. None of the parameters of the prompt are defined by default and therefore need to be defined if one
65
- wants to use the init_human_message_prompt_template. Default parameters are defined in flows.prompt_template.jinja2_prompts.JinjaPrompt.
66
  - `human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message (message used everytime the except the first time in).
67
- It's passed as the user message to the LLM. By default its of type flows.prompt_template.JinjaPrompt and has the following parameters:
68
  - `template` (str): The template of the human message. Default: see ChatAtomicFlow.yaml for the default value.
69
  - `input_variables` (List[str]): The input variables of the human message prompt template. Default: ["query"]
70
  - `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
@@ -91,7 +91,7 @@ all the messages of the flows's history are added to the input of the LLM. Defau
91
  - `human_message_prompt_template` (`JinjaPrompt`): The template of the human message. It is used to generate the human message.
92
  - `init_human_message_prompt_template` (`Optional[JinjaPrompt]`): The template of the human message that is used to initialize the conversation (first time in). It is used to generate the human message.
93
  - `backend` (`LiteLLMBackend`): The backend of the flow. It is a LLM that is queried via an API. See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers
94
- - `\**kwargs`: Additional arguments to pass to the flow. See :class:`flows.base_flows.AtomicFlow` for more details.
95
 
96
  <a id="ChatAtomicFlow.ChatAtomicFlow.set_up_flow_state"></a>
97
 
 
40
  - `assistant_name` (str): The name of the assistant (roles of LLM). Default: "assistant"
41
  - `backend` Dict[str,Any]: The backend of the flow. Used to call models via an API.
42
  See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers.
43
+ The default parameters of the backend are all defined at aiflows.backends.llm_lite.LiteLLMBackend
44
  (also see the defaul parameters of litellm's completion parameters: https://docs.litellm.ai/docs/completion/input#input-params-1).
45
  Except for the following parameters who are overwritten by the ChatAtomicFlow in ChatAtomicFlow.yaml:
46
  - `model_name` (Union[Dict[str,str],str]): The name of the model to use.
 
56
  - `presence_penalty` (number): It is used to penalize new tokens based on their existence in the text so far. Default: 0.0
57
  - `stream` (bool): Whether to stream the response or not. Default: True
58
  - `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
59
+ By default its of type aiflows.prompt_template.JinjaPrompt.
60
  None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
61
+ Default parameters are defined in aiflows.prompt_template.jinja2_prompts.JinjaPrompt.
62
  - `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
63
  (first time in). It is used to generate the human message. It's passed as the user message to the LLM.
64
+ By default its of type aiflows.prompt_template.JinjaPrompt. None of the parameters of the prompt are defined by default and therefore need to be defined if one
65
+ wants to use the init_human_message_prompt_template. Default parameters are defined in aiflows.prompt_template.jinja2_prompts.JinjaPrompt.
66
  - `human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message (message used everytime the except the first time in).
67
+ It's passed as the user message to the LLM. By default its of type aiflows.prompt_template.JinjaPrompt and has the following parameters:
68
  - `template` (str): The template of the human message. Default: see ChatAtomicFlow.yaml for the default value.
69
  - `input_variables` (List[str]): The input variables of the human message prompt template. Default: ["query"]
70
  - `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
 
91
  - `human_message_prompt_template` (`JinjaPrompt`): The template of the human message. It is used to generate the human message.
92
  - `init_human_message_prompt_template` (`Optional[JinjaPrompt]`): The template of the human message that is used to initialize the conversation (first time in). It is used to generate the human message.
93
  - `backend` (`LiteLLMBackend`): The backend of the flow. It is a LLM that is queried via an API. See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers
94
+ - `\**kwargs`: Additional arguments to pass to the flow. See :class:`aiflows.base_flows.AtomicFlow` for more details.
95
 
96
  <a id="ChatAtomicFlow.ChatAtomicFlow.set_up_flow_state"></a>
97
 
demo.yaml CHANGED
@@ -1,15 +1,15 @@
1
  input_interface: # Connector between the "input data" and the Flow
2
- _target_: flows.interfaces.KeyInterface
3
  additional_transformations:
4
- - _target_: flows.data_transformations.KeyMatchInput # Pass the input parameters specified by the flow
5
 
6
  output_interface: # Connector between the Flow's output and the caller
7
- _target_: flows.interfaces.KeyInterface
8
  keys_to_rename:
9
  api_output: answer # Rename the api_output to answer
10
 
11
  flow: # Overrides the ChatAtomicFlow config
12
- _target_: aiflows.ChatFlowModule.ChatAtomicFlow.instantiate_from_default_config
13
 
14
  name: "SimpleQA_Flow"
15
  description: "A flow that answers questions."
@@ -20,7 +20,7 @@ flow: # Overrides the ChatAtomicFlow config
20
 
21
  # ~~~ backend model parameters ~~
22
  backend:
23
- _target_: flows.backends.llm_lite.LiteLLMBackend
24
  api_infos: ???
25
  model_name:
26
  openai: "gpt-3.5-turbo"
@@ -40,7 +40,7 @@ flow: # Overrides the ChatAtomicFlow config
40
 
41
  # ~~~ Prompt specification ~~~
42
  system_message_prompt_template:
43
- _target_: flows.prompt_template.JinjaPrompt
44
  template: |2-
45
  You are a helpful chatbot that truthfully answers questions.
46
  input_variables: []
@@ -48,7 +48,7 @@ flow: # Overrides the ChatAtomicFlow config
48
 
49
 
50
  init_human_message_prompt_template:
51
- _target_: flows.prompt_template.JinjaPrompt
52
  template: |2-
53
  Answer the following question: {{question}}
54
  input_variables: ["question"]
 
1
  input_interface: # Connector between the "input data" and the Flow
2
+ _target_: aiflows.interfaces.KeyInterface
3
  additional_transformations:
4
+ - _target_: aiflows.data_transformations.KeyMatchInput # Pass the input parameters specified by the flow
5
 
6
  output_interface: # Connector between the Flow's output and the caller
7
+ _target_: aiflows.interfaces.KeyInterface
8
  keys_to_rename:
9
  api_output: answer # Rename the api_output to answer
10
 
11
  flow: # Overrides the ChatAtomicFlow config
12
+ _target_: flow_modules.aiflows.ChatFlowModule.ChatAtomicFlow.instantiate_from_default_config
13
 
14
  name: "SimpleQA_Flow"
15
  description: "A flow that answers questions."
 
20
 
21
  # ~~~ backend model parameters ~~
22
  backend:
23
+ _target_: aiflows.backends.llm_lite.LiteLLMBackend
24
  api_infos: ???
25
  model_name:
26
  openai: "gpt-3.5-turbo"
 
40
 
41
  # ~~~ Prompt specification ~~~
42
  system_message_prompt_template:
43
+ _target_: aiflows.prompt_template.JinjaPrompt
44
  template: |2-
45
  You are a helpful chatbot that truthfully answers questions.
46
  input_variables: []
 
48
 
49
 
50
  init_human_message_prompt_template:
51
+ _target_: aiflows.prompt_template.JinjaPrompt
52
  template: |2-
53
  Answer the following question: {{question}}
54
  input_variables: ["question"]
run.py CHANGED
@@ -2,13 +2,13 @@ import os
2
 
3
  import hydra
4
 
5
- import flows
6
- from flows.flow_launchers import FlowLauncher
7
- from flows.backends.api_info import ApiInfo
8
- from flows.utils.general_helpers import read_yaml_file
9
 
10
- from flows import logging
11
- from flows.flow_cache import CACHING_PARAMETERS, clear_cache
12
 
13
  CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
14
  # clear_cache() # Uncomment this line to clear the cache
@@ -18,7 +18,7 @@ logging.set_verbosity_debug()
18
  dependencies = [
19
  {"url": "aiflows/ChatFlowModule", "revision": os.getcwd()},
20
  ]
21
- from flows import flow_verse
22
  flow_verse.sync_dependencies(dependencies)
23
 
24
  if __name__ == "__main__":
 
2
 
3
  import hydra
4
 
5
+ import aiflows
6
+ from aiflows.flow_launchers import FlowLauncher
7
+ from aiflows.backends.api_info import ApiInfo
8
+ from aiflows.utils.general_helpers import read_yaml_file
9
 
10
+ from aiflows import logging
11
+ from aiflows.flow_cache import CACHING_PARAMETERS, clear_cache
12
 
13
  CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
14
  # clear_cache() # Uncomment this line to clear the cache
 
18
  dependencies = [
19
  {"url": "aiflows/ChatFlowModule", "revision": os.getcwd()},
20
  ]
21
+ from aiflows import flow_verse
22
  flow_verse.sync_dependencies(dependencies)
23
 
24
  if __name__ == "__main__":