File size: 1,244 Bytes
4f4d036
11ac790
 
a749ad1
 
4f4d036
 
 
 
 
 
 
 
 
bdc9b47
297c90d
bdc9b47
3db9faf
 
bdc9b47
 
 
 
 
 
cd18bf6
bdc9b47
 
 
4f4d036
297c90d
bdc9b47
4f4d036
 
297c90d
6c54581
 
 
4f4d036
 
297c90d
4f4d036
 
 
 
 
 
 
297c90d
bdc9b47
4f4d036
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# This is an abstract flow, therefore some required fields are not defined (and must be defined by the concrete flow)
_target_: flow_modules.aiflows.ChatFlowModule.ChatAtomicFlow.instantiate_from_default_config

name: ChatAtomicFlow
description: "Flow which uses as tool an LLM though an API"
enable_cache: True

n_api_retries: 6
wait_time_between_retries: 20

system_name: system
user_name: user
assistant_name: assistant

backend:
  _target_: aiflows.backends.llm_lite.LiteLLMBackend
  api_infos: ???
  model_name:
    openai: "gpt-3.5-turbo"
  n: 1
  max_tokens: 2000
  temperature: 0.3


  top_p: 0.2

  stream: True


system_message_prompt_template:
  _target_: aiflows.prompt_template.JinjaPrompt


init_human_message_prompt_template:
  _target_: aiflows.prompt_template.JinjaPrompt
  template: "{{query}}"
  input_variables:
    - "query"

human_message_prompt_template:
  _target_: aiflows.prompt_template.JinjaPrompt
  template: "{{query}}"
  input_variables:
    - "query"
input_interface_initialized:
  - "query"

query_message_prompt_template:
  _target_: aiflows.prompt_template.JinjaPrompt


previous_messages:
  first_k: null  # Note that the first message is the system prompt
  last_k: null

output_interface:
  - "api_output"