Spaces:
Running
Running
Update helper_functions_api.py
Browse files- helper_functions_api.py +15 -19
helper_functions_api.py
CHANGED
@@ -65,6 +65,7 @@ from brave import Brave
|
|
65 |
from fuzzy_json import loads
|
66 |
from half_json.core import JSONFixer
|
67 |
from openai import OpenAI
|
|
|
68 |
|
69 |
llm_default_small = "llama3-8b-8192"
|
70 |
llm_default_medium = "llama3-70b-8192"
|
@@ -82,25 +83,20 @@ def limit_tokens(input_string, token_limit=8000):
|
|
82 |
"""
|
83 |
return encoding.decode(encoding.encode(input_string)[:token_limit])
|
84 |
|
85 |
-
def together_response(message, model=
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
model=model,
|
100 |
-
messages=messages,
|
101 |
-
temperature=temperature,
|
102 |
-
)
|
103 |
-
return response.choices[0].message.content
|
104 |
|
105 |
|
106 |
def json_from_text(text):
|
|
|
65 |
from fuzzy_json import loads
|
66 |
from half_json.core import JSONFixer
|
67 |
from openai import OpenAI
|
68 |
+
from together import Together
|
69 |
|
70 |
llm_default_small = "llama3-8b-8192"
|
71 |
llm_default_medium = "llama3-70b-8192"
|
|
|
83 |
"""
|
84 |
return encoding.decode(encoding.encode(input_string)[:token_limit])
|
85 |
|
86 |
+
def together_response(message, model = "meta-llama/Llama-3-8b-chat-hf", SysPrompt = SysPromptDefault, temperature=0.2):
|
87 |
+
client = OpenAI(
|
88 |
+
api_key=TOGETHER_API_KEY,
|
89 |
+
base_url="https://together.hconeai.com/v1",
|
90 |
+
default_headers={ "Helicone-Auth": f"Bearer {HELICON_API_KEY}"})
|
91 |
+
|
92 |
+
messages=[{"role": "system", "content": SysPrompt},{"role": "user", "content": message}]
|
93 |
+
|
94 |
+
response = client.chat.completions.create(
|
95 |
+
model=model,
|
96 |
+
messages=messages,
|
97 |
+
temperature=temperature,
|
98 |
+
)
|
99 |
+
return response.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
|
102 |
def json_from_text(text):
|