Spaces:
Running
on
Zero
Running
on
Zero
Upload llmdolphin.py
Browse files- llmdolphin.py +16 -12
llmdolphin.py
CHANGED
@@ -76,6 +76,7 @@ llm_models = {
|
|
76 |
"ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
|
77 |
"ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
|
78 |
"ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
|
|
|
79 |
"Flowable-Docs-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Flowable-Docs-Llama-3.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
|
80 |
"slimorca-gemma2-9b-fft.Q4_K_M.gguf": ["mradermacher/slimorca-gemma2-9b-fft-GGUF", MessagesFormatterType.ALPACA],
|
81 |
"TQ2.5-14B-Sugarquill-v1-Q4_K_M.gguf": ["bartowski/TQ2.5-14B-Sugarquill-v1-GGUF", MessagesFormatterType.OPEN_CHAT],
|
@@ -940,8 +941,17 @@ def list_uniq(l: list):
|
|
940 |
return sorted(set(l), key=l.index)
|
941 |
|
942 |
|
|
|
|
|
|
|
|
|
|
|
|
|
943 |
def get_state(state: dict, key: str):
|
944 |
if key in state.keys(): return state[key]
|
|
|
|
|
|
|
945 |
else:
|
946 |
print(f"State '{key}' not found.")
|
947 |
return None
|
@@ -1327,10 +1337,8 @@ def dolphin_respond(
|
|
1327 |
try:
|
1328 |
progress(0, desc="Processing...")
|
1329 |
override_llm_format = get_state(state, "override_llm_format")
|
1330 |
-
if override_llm_format:
|
1331 |
-
|
1332 |
-
else:
|
1333 |
-
chat_template = llm_models[model][1]
|
1334 |
|
1335 |
llm = Llama(
|
1336 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
@@ -1431,10 +1439,8 @@ def dolphin_respond_auto(
|
|
1431 |
progress(0, desc="Processing...")
|
1432 |
|
1433 |
override_llm_format = get_state(state, "override_llm_format")
|
1434 |
-
if override_llm_format:
|
1435 |
-
|
1436 |
-
else:
|
1437 |
-
chat_template = llm_models[model][1]
|
1438 |
|
1439 |
llm = Llama(
|
1440 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
@@ -1538,10 +1544,8 @@ def respond_playground(
|
|
1538 |
):
|
1539 |
try:
|
1540 |
override_llm_format = get_state(state, "override_llm_format")
|
1541 |
-
if override_llm_format:
|
1542 |
-
|
1543 |
-
else:
|
1544 |
-
chat_template = llm_models[model][1]
|
1545 |
|
1546 |
llm = Llama(
|
1547 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
|
|
76 |
"ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
|
77 |
"ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
|
78 |
"ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
|
79 |
+
"dolphin-2.6-mistral-7b-dpo-laser.Q4_K_S.gguf": ["mradermacher/dolphin-2.6-mistral-7b-dpo-laser-GGUF", MessagesFormatterType.MISTRAL],
|
80 |
"Flowable-Docs-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Flowable-Docs-Llama-3.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
|
81 |
"slimorca-gemma2-9b-fft.Q4_K_M.gguf": ["mradermacher/slimorca-gemma2-9b-fft-GGUF", MessagesFormatterType.ALPACA],
|
82 |
"TQ2.5-14B-Sugarquill-v1-Q4_K_M.gguf": ["bartowski/TQ2.5-14B-Sugarquill-v1-GGUF", MessagesFormatterType.OPEN_CHAT],
|
|
|
941 |
return sorted(set(l), key=l.index)
|
942 |
|
943 |
|
944 |
+
DEFAULT_STATE = {
|
945 |
+
"dolphin_sysprompt_mode": "Default",
|
946 |
+
"dolphin_output_language": llm_languages[0],
|
947 |
+
}
|
948 |
+
|
949 |
+
|
950 |
def get_state(state: dict, key: str):
|
951 |
if key in state.keys(): return state[key]
|
952 |
+
elif key in DEFAULT_STATE.keys():
|
953 |
+
print(f"State '{key}' not found. Use dedault value.")
|
954 |
+
return DEFAULT_STATE[key]
|
955 |
else:
|
956 |
print(f"State '{key}' not found.")
|
957 |
return None
|
|
|
1337 |
try:
|
1338 |
progress(0, desc="Processing...")
|
1339 |
override_llm_format = get_state(state, "override_llm_format")
|
1340 |
+
if override_llm_format: chat_template = override_llm_format
|
1341 |
+
else: chat_template = llm_models[model][1]
|
|
|
|
|
1342 |
|
1343 |
llm = Llama(
|
1344 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
|
|
1439 |
progress(0, desc="Processing...")
|
1440 |
|
1441 |
override_llm_format = get_state(state, "override_llm_format")
|
1442 |
+
if override_llm_format: chat_template = override_llm_format
|
1443 |
+
else: chat_template = llm_models[model][1]
|
|
|
|
|
1444 |
|
1445 |
llm = Llama(
|
1446 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
|
|
1544 |
):
|
1545 |
try:
|
1546 |
override_llm_format = get_state(state, "override_llm_format")
|
1547 |
+
if override_llm_format: chat_template = override_llm_format
|
1548 |
+
else: chat_template = llm_models[model][1]
|
|
|
|
|
1549 |
|
1550 |
llm = Llama(
|
1551 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|