Spaces:
Runtime error
Runtime error
Dagfinn1962
commited on
Commit
•
7361517
1
Parent(s):
2614d13
Update src/llm_boilers.py
Browse files- src/llm_boilers.py +6 -30
src/llm_boilers.py
CHANGED
@@ -14,8 +14,6 @@ class llm_boiler:
|
|
14 |
def __init__(self, model_id, openai_key):
|
15 |
self.model_id = model_id
|
16 |
self.openai_key = openai_key
|
17 |
-
self.load_fn = None # Add the load_fn attribute
|
18 |
-
self.run_fn = None # Add the run_fn attribute
|
19 |
for f_idx, run_function in enumerate(MODEL_FUNCTIONS):
|
20 |
if run_function.__name__.lower() in self.model_id:
|
21 |
print(
|
@@ -28,8 +26,6 @@ class llm_boiler:
|
|
28 |
f"Run function recognized for {self.model_id}: {run_function.__name__.lower()}"
|
29 |
)
|
30 |
self.run_fn = run_function
|
31 |
-
if self.load_fn is None or self.run_fn is None:
|
32 |
-
raise ValueError("Invalid model_id")
|
33 |
self.model = self.load_fn(self.model_id, self.openai_key)
|
34 |
self.name = self.run_fn.__name__.lower()
|
35 |
|
@@ -45,7 +41,6 @@ class llm_boiler:
|
|
45 |
)
|
46 |
|
47 |
|
48 |
-
|
49 |
LOAD_MODEL_FUNCTIONS = []
|
50 |
MODEL_FUNCTIONS = []
|
51 |
|
@@ -56,17 +51,7 @@ def gpt_loader(model_id: str, openai_key: str):
|
|
56 |
openai.api_key = openai_key # os.getenv("OPENAI_API_KEY")
|
57 |
logging.warning(f"model id: {model_id}")
|
58 |
|
59 |
-
|
60 |
-
model=model_id,
|
61 |
-
messages=[],
|
62 |
-
temperature=0.0,
|
63 |
-
max_tokens=0,
|
64 |
-
n=1,
|
65 |
-
stop=None,
|
66 |
-
log_level="info",
|
67 |
-
)
|
68 |
-
|
69 |
-
return model
|
70 |
|
71 |
|
72 |
LOAD_MODEL_FUNCTIONS.append(gpt_loader)
|
@@ -89,7 +74,7 @@ def gpt(
|
|
89 |
temperature (float, optional): The value used to modulate the next token probabilities.
|
90 |
Defaults to 1.0
|
91 |
"""
|
92 |
-
conversation = prompt.split("
|
93 |
|
94 |
messages = []
|
95 |
for turn in conversation:
|
@@ -99,14 +84,14 @@ def gpt(
|
|
99 |
messages.append(
|
100 |
{
|
101 |
"role": "system",
|
102 |
-
"content": turn.replace("system\n", "").replace("
|
103 |
}
|
104 |
)
|
105 |
elif first_word == "user":
|
106 |
messages.append(
|
107 |
{
|
108 |
"role": "user",
|
109 |
-
"content": turn.replace("user\n", "").replace("
|
110 |
}
|
111 |
)
|
112 |
elif first_word == "assistant":
|
@@ -114,7 +99,7 @@ def gpt(
|
|
114 |
{
|
115 |
"role": "assistant",
|
116 |
"content": turn.replace("assistant\n", "").replace(
|
117 |
-
"
|
118 |
),
|
119 |
}
|
120 |
)
|
@@ -130,13 +115,4 @@ def gpt(
|
|
130 |
return chat_completion
|
131 |
|
132 |
|
133 |
-
|
134 |
-
model_id = "dfurman/chat-gpt-3.5-turbo"
|
135 |
-
openai_key = os.getenv("API_KEY")
|
136 |
-
|
137 |
-
model = llm_boiler(model_id, openai_key)
|
138 |
-
|
139 |
-
prompt = "Hello, how are you?"
|
140 |
-
temperature = 0.8
|
141 |
-
|
142 |
-
response = model.run(prompt, temperature)
|
|
|
14 |
def __init__(self, model_id, openai_key):
|
15 |
self.model_id = model_id
|
16 |
self.openai_key = openai_key
|
|
|
|
|
17 |
for f_idx, run_function in enumerate(MODEL_FUNCTIONS):
|
18 |
if run_function.__name__.lower() in self.model_id:
|
19 |
print(
|
|
|
26 |
f"Run function recognized for {self.model_id}: {run_function.__name__.lower()}"
|
27 |
)
|
28 |
self.run_fn = run_function
|
|
|
|
|
29 |
self.model = self.load_fn(self.model_id, self.openai_key)
|
30 |
self.name = self.run_fn.__name__.lower()
|
31 |
|
|
|
41 |
)
|
42 |
|
43 |
|
|
|
44 |
LOAD_MODEL_FUNCTIONS = []
|
45 |
MODEL_FUNCTIONS = []
|
46 |
|
|
|
51 |
openai.api_key = openai_key # os.getenv("OPENAI_API_KEY")
|
52 |
logging.warning(f"model id: {model_id}")
|
53 |
|
54 |
+
return model_id
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
|
57 |
LOAD_MODEL_FUNCTIONS.append(gpt_loader)
|
|
|
74 |
temperature (float, optional): The value used to modulate the next token probabilities.
|
75 |
Defaults to 1.0
|
76 |
"""
|
77 |
+
conversation = prompt.split("<|im_start|>")
|
78 |
|
79 |
messages = []
|
80 |
for turn in conversation:
|
|
|
84 |
messages.append(
|
85 |
{
|
86 |
"role": "system",
|
87 |
+
"content": turn.replace("system\n", "").replace("<|im_end|>\n", ""),
|
88 |
}
|
89 |
)
|
90 |
elif first_word == "user":
|
91 |
messages.append(
|
92 |
{
|
93 |
"role": "user",
|
94 |
+
"content": turn.replace("user\n", "").replace("<|im_end|>\n", ""),
|
95 |
}
|
96 |
)
|
97 |
elif first_word == "assistant":
|
|
|
99 |
{
|
100 |
"role": "assistant",
|
101 |
"content": turn.replace("assistant\n", "").replace(
|
102 |
+
"<|im_end|>\n", ""
|
103 |
),
|
104 |
}
|
105 |
)
|
|
|
115 |
return chat_completion
|
116 |
|
117 |
|
118 |
+
MODEL_FUNCTIONS.append(gpt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|