Spaces:
Running
Running
Kang Suhyun
commited on
Commit
•
2667b32
1
Parent(s):
5cbd838
[#115|#116] Reintroduce EEVE (#121)
Browse files* [#115|#116] Reintroduce EEVE
* _get_completion_kwargs
* remove print
* fix
* inline
model.py
CHANGED
@@ -3,6 +3,7 @@ This module contains functions to interact with the models.
|
|
3 |
"""
|
4 |
|
5 |
import json
|
|
|
6 |
from typing import List
|
7 |
|
8 |
import litellm
|
@@ -48,15 +49,15 @@ Output following this JSON format:
|
|
48 |
"role": "user",
|
49 |
"content": prompt
|
50 |
}]
|
|
|
51 |
try:
|
52 |
-
response = litellm.completion(
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
response_format={"type": "json_object"})
|
60 |
|
61 |
json_response = response.choices[0].message.content
|
62 |
parsed_json = json.loads(json_response)
|
@@ -67,6 +68,14 @@ Output following this JSON format:
|
|
67 |
except json.JSONDecodeError as e:
|
68 |
raise RuntimeError(f"Failed to get JSON response: {e}") from e
|
69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
class AnthropicModel(Model):
|
72 |
|
@@ -109,6 +118,25 @@ Text:
|
|
109 |
return result.removesuffix(suffix).strip()
|
110 |
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
supported_models: List[Model] = [
|
113 |
Model("gpt-4o-2024-05-13"),
|
114 |
Model("gpt-4-turbo-2024-04-09"),
|
@@ -121,6 +149,10 @@ supported_models: List[Model] = [
|
|
121 |
Model("mistral-large-2402", provider="mistral"),
|
122 |
Model("llama3-8b-8192", provider="groq"),
|
123 |
Model("llama3-70b-8192", provider="groq"),
|
|
|
|
|
|
|
|
|
124 |
]
|
125 |
|
126 |
|
|
|
3 |
"""
|
4 |
|
5 |
import json
|
6 |
+
import os
|
7 |
from typing import List
|
8 |
|
9 |
import litellm
|
|
|
49 |
"role": "user",
|
50 |
"content": prompt
|
51 |
}]
|
52 |
+
|
53 |
try:
|
54 |
+
response = litellm.completion(model=self.provider + "/" +
|
55 |
+
self.name if self.provider else self.name,
|
56 |
+
api_key=self.api_key,
|
57 |
+
api_base=self.api_base,
|
58 |
+
messages=messages,
|
59 |
+
max_tokens=max_tokens,
|
60 |
+
**self._get_completion_kwargs())
|
|
|
61 |
|
62 |
json_response = response.choices[0].message.content
|
63 |
parsed_json = json.loads(json_response)
|
|
|
68 |
except json.JSONDecodeError as e:
|
69 |
raise RuntimeError(f"Failed to get JSON response: {e}") from e
|
70 |
|
71 |
+
def _get_completion_kwargs(self):
|
72 |
+
return {
|
73 |
+
# Ref: https://litellm.vercel.app/docs/completion/input#optional-fields # pylint: disable=line-too-long
|
74 |
+
"response_format": {
|
75 |
+
"type": "json_object"
|
76 |
+
}
|
77 |
+
}
|
78 |
+
|
79 |
|
80 |
class AnthropicModel(Model):
|
81 |
|
|
|
118 |
return result.removesuffix(suffix).strip()
|
119 |
|
120 |
|
121 |
+
class EeveModel(Model):
|
122 |
+
|
123 |
+
def _get_completion_kwargs(self):
|
124 |
+
json_template = {
|
125 |
+
"type": "object",
|
126 |
+
"properties": {
|
127 |
+
"result": {
|
128 |
+
"type": "string"
|
129 |
+
}
|
130 |
+
}
|
131 |
+
}
|
132 |
+
return {
|
133 |
+
"extra_body": {
|
134 |
+
"guided_json": json.dumps(json_template),
|
135 |
+
"guided_decoding_backend": "lm-format-enforcer"
|
136 |
+
}
|
137 |
+
}
|
138 |
+
|
139 |
+
|
140 |
supported_models: List[Model] = [
|
141 |
Model("gpt-4o-2024-05-13"),
|
142 |
Model("gpt-4-turbo-2024-04-09"),
|
|
|
149 |
Model("mistral-large-2402", provider="mistral"),
|
150 |
Model("llama3-8b-8192", provider="groq"),
|
151 |
Model("llama3-70b-8192", provider="groq"),
|
152 |
+
EeveModel("yanolja/EEVE-Korean-Instruct-10.8B-v1.0",
|
153 |
+
provider="openai",
|
154 |
+
api_base=os.getenv("EEVE_API_BASE"),
|
155 |
+
api_key=os.getenv("EEVE_API_KEY")),
|
156 |
]
|
157 |
|
158 |
|