pvanand commited on
Commit
c65f2f6
1 Parent(s): 5aac296

Update helper_functions_api.py

Browse files
Files changed (1) hide show
  1. helper_functions_api.py +33 -10
helper_functions_api.py CHANGED
@@ -82,22 +82,45 @@ def limit_tokens(input_string, token_limit=7500):
82
  """
83
  return encoding.decode(encoding.encode(input_string)[:token_limit])
84
 
85
- def together_response(message, model = "meta-llama/Llama-3-8b-chat-hf", SysPrompt = SysPromptDefault, temperature=0.2, frequency_penalty =0.1, max_tokens= 2000):
86
- client = OpenAI(
87
  api_key=TOGETHER_API_KEY,
88
  base_url="https://together.hconeai.com/v1",
89
  default_headers={ "Helicone-Auth": f"Bearer {HELICON_API_KEY}"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  messages=[{"role": "system", "content": SysPrompt},{"role": "user", "content": message}]
 
 
 
 
 
 
 
 
 
 
92
 
93
- response = client.chat.completions.create(
94
- model=model,
95
- messages=messages,
96
- temperature=temperature,
97
- frequency_penalty = frequency_penalty
98
- )
99
- return response.choices[0].message.content
100
-
101
 
102
  def json_from_text(text):
103
  """
 
82
  """
83
  return encoding.decode(encoding.encode(input_string)[:token_limit])
84
 
85
+ together_client = OpenAI(
 
86
  api_key=TOGETHER_API_KEY,
87
  base_url="https://together.hconeai.com/v1",
88
  default_headers={ "Helicone-Auth": f"Bearer {HELICON_API_KEY}"})
89
+
90
+ groq_client = OpenAI(
91
+ api_key=GROQ_API_KEY,
92
+ base_url="https://groq.hconeai.com/openai/v1",
93
+ default_headers={ "Helicone-Auth": f"Bearer {HELICON_API_KEY}"})
94
+
95
+ # Groq model names
96
+ llm_default_small = "llama3-8b-8192"
97
+ llm_default_medium = "llama3-70b-8192"
98
+
99
+ # Together Model names (fallback)
100
+ llm_fallback_small = "meta-llama/Llama-3-8b-chat-hf"
101
+ llm_fallback_medium = "meta-llama/Llama-3-70b-chat-hf"
102
+
103
+ ### ------END OF LLM CONFIG-------- ###
104
+
105
+ def together_response(message, model = llm_default_small, SysPrompt = SysPromptDefault, temperature=0.2, frequency_penalty =0.1, max_tokens= 2000):
106
 
107
  messages=[{"role": "system", "content": SysPrompt},{"role": "user", "content": message}]
108
+ params = {
109
+ "model": model,
110
+ "messages": messages,
111
+ "temperature": temperature,
112
+ "frequency_penalty": frequency_penalty,
113
+ "max_tokens": max_tokens
114
+ }
115
+ try:
116
+ response = groq_client.chat.completions.create(**params)
117
+ return response.choices[0].message.content
118
 
119
+ except Exception as e:
120
+ print(f"Error calling GROQ API: {e}")
121
+ params["model"] = llm_fallback_small if model == llm_default_small else llm_fallback_medium
122
+ response = together_client.chat.completions.create(**params)
123
+ return response.choices[0].message.content
 
 
 
124
 
125
  def json_from_text(text):
126
  """