Neel-Shah-29 commited on
Commit
ebe1791
·
verified ·
1 Parent(s): 504aad1

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +3 -0
run.py CHANGED
@@ -21,9 +21,11 @@ CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
21
 
22
  logging.set_verbosity_debug()
23
  # Comment out if using huggingface backend
 
24
  # import litellm
25
  # litellm.set_verbose=True
26
  # litellm.drop_params=True
 
27
  dependencies = [
28
  {"url": "aiflows/ChatFlowModule", "revision": os.getcwd()},
29
  ]
@@ -48,6 +50,7 @@ if __name__ == "__main__":
48
  api_information = [ApiInfo(backend_used="openai",
49
  api_key = os.getenv("OPENAI_API_KEY"))]
50
  #Huggingface backend
 
51
  # api_information = [ApiInfo(backend_used="huggingface",
52
  # api_key = os.getenv("HUGGINGFACE_API_KEY"), api_base="http://0.0.0.0:5000/v1/completions")]
53
  # # Azure backend
 
21
 
22
  logging.set_verbosity_debug()
23
  # Comment out if using huggingface backend
24
+ # The set_verbose() method is used for debugging and logging the outputs and drop_params() method automatically drops the parameters which are not set my litellm (since the response format of OpenAI and huggingface is different we require this to translate Huggingface to OpenAI response format).
25
  # import litellm
26
  # litellm.set_verbose=True
27
  # litellm.drop_params=True
28
+
29
  dependencies = [
30
  {"url": "aiflows/ChatFlowModule", "revision": os.getcwd()},
31
  ]
 
50
  api_information = [ApiInfo(backend_used="openai",
51
  api_key = os.getenv("OPENAI_API_KEY"))]
52
  #Huggingface backend
53
+ # Here the API_BASE is the api link to your hosted server. By this way, you can run any flows on your own server.
54
  # api_information = [ApiInfo(backend_used="huggingface",
55
  # api_key = os.getenv("HUGGINGFACE_API_KEY"), api_base="http://0.0.0.0:5000/v1/completions")]
56
  # # Azure backend