File size: 3,230 Bytes
40a0d94 93c74cc 5fbaf2a 40a0d94 93c74cc 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a 40a0d94 5fbaf2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import os
import hydra
import aiflows
from aiflows.flow_launchers import FlowLauncher
from aiflows.backends.api_info import ApiInfo
from aiflows.utils.general_helpers import read_yaml_file, quick_load_api_keys
from aiflows import logging
from aiflows.flow_cache import CACHING_PARAMETERS, clear_cache
from aiflows.utils import serve_utils
from aiflows.workers import run_dispatch_worker_thread
from aiflows.messages import FlowMessage
from aiflows.interfaces import KeyInterface
CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
# clear_cache() # Uncomment this line to clear the cache
logging.set_verbosity_debug()
from aiflows import flow_verse
# ~~~ Load Flow dependecies from FlowVerse ~~~
dependencies = [
{"url": "aiflows/ControllerExecutorFlowModule", "revision": os.getcwd()},
]
flow_verse.sync_dependencies(dependencies)
if __name__ == "__main__":
# ~~~ Set the API information ~~~
# OpenAI backend
api_information = [ApiInfo(backend_used="openai", api_key=os.getenv("OPENAI_API_KEY"))]
# Azure backend
# api_information = [ApiInfo(backend_used = "azure",
# api_base = os.getenv("AZURE_API_BASE"),
# api_key = os.getenv("AZURE_OPENAI_KEY"),
# api_version = os.getenv("AZURE_API_VERSION") )]
FLOW_MODULES_PATH = "./"
jwt = os.getenv("COLINK_JWT")
addr = os.getenv("LOCAL_COLINK_ADDRESS")
cl = serve_utils.start_colink_component(
"Reverse Number Demo",
{"jwt": jwt, "addr": addr}
)
# path_to_output_file = "output.jsonl" # Uncomment this line to save the output to disk
root_dir = "."
cfg_path = os.path.join(root_dir, "demo.yaml")
cfg = read_yaml_file(cfg_path)
# put the API information in the config
serve_utils.recursive_serve_flow(
cl = cl,
flow_type="ReAct_served",
default_config=cfg,
default_state=None,
default_dispatch_point="coflows_dispatch",
)
#in case you haven't started the dispatch worker thread, uncomment
#run_dispatch_worker_thread(cl, dispatch_point="coflows_dispatch", flow_modules_base_path=FLOW_MODULES_PATH)
quick_load_api_keys(cfg, api_information, key="api_infos")
# ~~~ Get the data ~~~
# This can be a list of samples
# data = {"id": 0, "goal": "Answer the following question: What is the population of Canada?"} # Uses wikipedia
# data = {"id": 0, "goal": "Answer the following question: Who was the NBA champion in 2023?"}
data = {
"id": 0,
"goal": "Answer the following question: What is the profession and date of birth of Michael Jordan?",
}
# ~~~ Run inference ~~~
proxy_flow = serve_utils.recursive_mount(
cl=cl,
client_id="local",
flow_type="ReAct_served",
config_overrides=cfg,
initial_state=None,
dispatch_point_override=None,
)
# ~~~ Print the output ~~~
input_message = FlowMessage(
data= data,
src_flow="Coflows team",
dst_flow=proxy_flow,
is_input_msg=True
)
future = proxy_flow.ask(input_message)
print(future.get_data())
|