baconnier commited on
Commit
420f262
1 Parent(s): f387781

Update variables.py

Browse files
Files changed (1) hide show
  1. variables.py +35 -45
variables.py CHANGED
@@ -5,75 +5,65 @@ import os
5
  templates_json = os.getenv('PROMPT_TEMPLATES', '{}')
6
 
7
  try:
8
- # Parse JSON data with error handling
9
- prompt_data = json.loads(templates_json)
10
  except json.JSONDecodeError:
11
- # Fallback to empty dict if JSON is invalid
12
- prompt_data = {}
13
-
14
 
15
  # Create explanations dictionary with safe access
16
  metaprompt_explanations = {
17
- key: data.get("description", "No description available")
18
- for key, data in prompt_data.items()
19
  } if prompt_data else {}
20
 
21
  # Generate markdown explanation
22
  explanation_markdown = "".join([
23
- f"- **{key}**: {value}\n"
24
- for key, value in metaprompt_explanations.items()
25
  ])
26
 
27
  # Define models list
28
  models = [
29
- "meta-llama/Meta-Llama-3-70B-Instruct",
30
- "meta-llama/Meta-Llama-3-8B-Instruct",
31
- "meta-llama/Llama-3.1-70B-Instruct",
32
- "meta-llama/Llama-3.1-8B-Instruct",
33
- "meta-llama/Llama-3.2-3B-Instruct",
34
- "meta-llama/Llama-3.2-1B-Instruct",
35
- "meta-llama/Llama-2-13b-chat-hf",
36
- "meta-llama/Llama-2-7b-chat-hf",
37
- "HuggingFaceH4/zephyr-7b-beta",
38
- "HuggingFaceH4/zephyr-7b-alpha",
39
- "Qwen/Qwen2.5-72B-Instruct",
40
- "Qwen/Qwen2.5-1.5B",
41
- "microsoft/Phi-3.5-mini-instruct"
42
  ]
43
 
44
- examples=[
45
- ["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "done"],
46
- ["Tell me about that guy who invented the light bulb", "physics"],
47
- ["Explain the universe.", "star"],
48
- ["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
49
- ["List American presidents.", "verse"],
50
- ["Explain why the experiment failed.", "morphosis"],
51
- ["Is nuclear energy good?", "verse"],
52
- ["How does a computer work?", "phor"],
53
- ["How to make money fast?", "done"],
54
- ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
55
- ]
56
 
57
  # Get API token with error handling
58
  api_token = os.getenv('HF_API_TOKEN')
59
  if not api_token:
60
- raise ValueError("HF_API_TOKEN not found in environment variables")
61
 
62
  # Create meta_prompts dictionary with safe access
63
  meta_prompts = {
64
- key: data.get("template", "No template available")
65
- for key, data in prompt_data.items()
66
  } if prompt_data else {}
67
 
68
  prompt_refiner_model = os.getenv('prompt_refiner_model', 'meta-llama/Llama-3.1-8B-Instruct')
69
- print("prompt_refiner_model used :"+prompt_refiner_model)
70
 
71
- #prompt_refiner_model = os.getenv('prompt_refiner_model')
72
  echo_prompt_refiner = os.getenv('echo_prompt_refiner')
73
-
74
-
75
  openai_metaprompt = os.getenv('openai_metaprompt')
76
-
77
- advanced_meta_prompt = os.getenv('advanced_meta_prompt')
78
-
79
-
 
5
  templates_json = os.getenv('PROMPT_TEMPLATES', '{}')
6
 
7
  try:
8
+ # Parse JSON data with error handling
9
+ prompt_data = json.loads(templates_json)
10
  except json.JSONDecodeError:
11
+ # Fallback to empty dict if JSON is invalid
12
+ prompt_data = {}
 
13
 
14
  # Create explanations dictionary with safe access
15
  metaprompt_explanations = {
16
+ key: data.get("description", "No description available")
17
+ for key, data in prompt_data.items()
18
  } if prompt_data else {}
19
 
20
  # Generate markdown explanation
21
  explanation_markdown = "".join([
22
+ f"- **{key}**: {value}\n"
23
+ for key, value in metaprompt_explanations.items()
24
  ])
25
 
26
  # Define models list
27
  models = [
28
+ "meta-llama/Meta-Llama-3-70B-Instruct",
29
+ "meta-llama/Meta-Llama-3-8B-Instruct",
30
+ "meta-llama/Llama-3.1-70B-Instruct",
31
+ "meta-llama/Llama-3.1-8B-Instruct",
32
+ "meta-llama/Llama-3.2-3B-Instruct",
33
+ "meta-llama/Llama-3.2-1B-Instruct",
34
+ "meta-llama/Llama-2-13b-chat-hf",
35
+ "meta-llama/Llama-2-7b-chat-hf",
36
+ "HuggingFaceH4/zephyr-7b-beta",
37
+ "HuggingFaceH4/zephyr-7b-alpha",
38
+ "Qwen/Qwen2.5-72B-Instruct",
39
+ "Qwen/Qwen2.5-1.5B",
40
+ "microsoft/Phi-3.5-mini-instruct"
41
  ]
42
 
43
+ # Extract examples only from JSON templates
44
+ examples = []
45
+ for key, data in prompt_data.items():
46
+ template_examples = data.get("examples", [])
47
+ if template_examples:
48
+ examples.extend([
49
+ [example[0], key] if isinstance(example, list) else [example, key]
50
+ for example in template_examples
51
+ ])
 
 
 
52
 
53
  # Get API token with error handling
54
  api_token = os.getenv('HF_API_TOKEN')
55
  if not api_token:
56
+ raise ValueError("HF_API_TOKEN not found in environment variables")
57
 
58
  # Create meta_prompts dictionary with safe access
59
  meta_prompts = {
60
+ key: data.get("template", "No template available")
61
+ for key, data in prompt_data.items()
62
  } if prompt_data else {}
63
 
64
  prompt_refiner_model = os.getenv('prompt_refiner_model', 'meta-llama/Llama-3.1-8B-Instruct')
65
+ print("prompt_refiner_model used :" + prompt_refiner_model)
66
 
 
67
  echo_prompt_refiner = os.getenv('echo_prompt_refiner')
 
 
68
  openai_metaprompt = os.getenv('openai_metaprompt')
69
+ advanced_meta_prompt = os.getenv('advanced_meta_prompt')