Gregor Betz commited on
Commit
b1470b2
1 Parent(s): f9488c7
Files changed (2) hide show
  1. backend/config.py +4 -0
  2. config.yaml +2 -2
backend/config.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
 
3
 
@@ -53,5 +54,8 @@ def process_config(config):
53
  else:
54
  raise ValueError("config.yaml is missing classifier_llm settings.")
55
 
 
 
 
56
  return client_kwargs, guide_kwargs
57
 
 
1
+ import logging
2
  import os
3
 
4
 
 
54
  else:
55
  raise ValueError("config.yaml is missing classifier_llm settings.")
56
 
57
+ logging.info(f"client_kwargs: {client_kwargs}")
58
+ logging.info(f"guide_kwargs: {guide_kwargs}")
59
+
60
  return client_kwargs, guide_kwargs
61
 
config.yaml CHANGED
@@ -1,10 +1,10 @@
1
  client_llm:
2
- url: "" # <-- start your own inference endpoint here and provide url (or use https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta)
3
  model_id: "HuggingFaceH4/zephyr-7b-beta" # <-- your client llm
4
  max_tokens: 800
5
  temperature: 0.6
6
  expert_llm:
7
- url: "" # <-- start your own inference endpoint here and provide url (or use https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3.1-70B-Instruct)
8
  model_id: "meta-llama/Meta-Llama-3.1-70B-Instruct"
9
  classifier_llm:
10
  model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
 
1
  client_llm:
2
+ url: "" # <-- start your own inference endpoint and provide url here (or use https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta)
3
  model_id: "HuggingFaceH4/zephyr-7b-beta" # <-- your client llm
4
  max_tokens: 800
5
  temperature: 0.6
6
  expert_llm:
7
+ url: "" # <-- start your own inference endpoint and provide url here (or use https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3.1-70B-Instruct)
8
  model_id: "meta-llama/Meta-Llama-3.1-70B-Instruct"
9
  classifier_llm:
10
  model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"