Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Remove model_type and base_model from the submission tab
Browse files- app.py +0 -9
- src/submission/submit.py +4 -14
app.py
CHANGED
@@ -465,13 +465,6 @@ with gr.Blocks() as demo_submission:
|
|
465 |
multiselect=False,
|
466 |
value="float16",
|
467 |
)
|
468 |
-
weight_type = gr.Dropdown(
|
469 |
-
label="Weights type",
|
470 |
-
choices=[i.value.name for i in WeightType],
|
471 |
-
multiselect=False,
|
472 |
-
value="Original",
|
473 |
-
)
|
474 |
-
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
475 |
add_special_tokens = gr.Dropdown(
|
476 |
label="AddSpecialTokens",
|
477 |
choices=[i.value.name for i in AddSpecialTokens if i != AddSpecialTokens.Unknown],
|
@@ -485,10 +478,8 @@ with gr.Blocks() as demo_submission:
|
|
485 |
add_new_eval,
|
486 |
[
|
487 |
model_name_textbox,
|
488 |
-
base_model_name_textbox,
|
489 |
revision_name_textbox,
|
490 |
precision,
|
491 |
-
weight_type,
|
492 |
model_type,
|
493 |
add_special_tokens,
|
494 |
],
|
|
|
465 |
multiselect=False,
|
466 |
value="float16",
|
467 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
468 |
add_special_tokens = gr.Dropdown(
|
469 |
label="AddSpecialTokens",
|
470 |
choices=[i.value.name for i in AddSpecialTokens if i != AddSpecialTokens.Unknown],
|
|
|
478 |
add_new_eval,
|
479 |
[
|
480 |
model_name_textbox,
|
|
|
481 |
revision_name_textbox,
|
482 |
precision,
|
|
|
483 |
model_type,
|
484 |
add_special_tokens,
|
485 |
],
|
src/submission/submit.py
CHANGED
@@ -16,10 +16,8 @@ USERS_TO_SUBMISSION_DATES = None
|
|
16 |
|
17 |
def add_new_eval(
|
18 |
model: str,
|
19 |
-
base_model: str,
|
20 |
revision: str,
|
21 |
precision: str,
|
22 |
-
weight_type: str,
|
23 |
model_type: str,
|
24 |
add_special_tokens: str,
|
25 |
):
|
@@ -45,15 +43,9 @@ def add_new_eval(
|
|
45 |
revision = "main"
|
46 |
|
47 |
# Is the model on the hub?
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
return styled_error(f'Base model "{base_model}" {error}')
|
52 |
-
|
53 |
-
if not weight_type == "Adapter":
|
54 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
|
55 |
-
if not model_on_hub:
|
56 |
-
return styled_error(f'Model "{model}" {error}')
|
57 |
|
58 |
# Is the model info correctly filled?
|
59 |
try:
|
@@ -78,10 +70,8 @@ def add_new_eval(
|
|
78 |
|
79 |
eval_entry = {
|
80 |
"model": model,
|
81 |
-
"base_model": base_model,
|
82 |
"revision": revision,
|
83 |
"precision": precision,
|
84 |
-
"weight_type": weight_type,
|
85 |
"status": "PENDING",
|
86 |
"submitted_time": current_time,
|
87 |
"model_type": model_type,
|
@@ -99,7 +89,7 @@ def add_new_eval(
|
|
99 |
print("Creating eval file")
|
100 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
101 |
os.makedirs(OUT_DIR, exist_ok=True)
|
102 |
-
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{
|
103 |
|
104 |
with open(out_path, "w") as f:
|
105 |
f.write(json.dumps(eval_entry))
|
|
|
16 |
|
17 |
def add_new_eval(
|
18 |
model: str,
|
|
|
19 |
revision: str,
|
20 |
precision: str,
|
|
|
21 |
model_type: str,
|
22 |
add_special_tokens: str,
|
23 |
):
|
|
|
43 |
revision = "main"
|
44 |
|
45 |
# Is the model on the hub?
|
46 |
+
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
|
47 |
+
if not model_on_hub:
|
48 |
+
return styled_error(f'Model "{model}" {error}')
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
# Is the model info correctly filled?
|
51 |
try:
|
|
|
70 |
|
71 |
eval_entry = {
|
72 |
"model": model,
|
|
|
73 |
"revision": revision,
|
74 |
"precision": precision,
|
|
|
75 |
"status": "PENDING",
|
76 |
"submitted_time": current_time,
|
77 |
"model_type": model_type,
|
|
|
89 |
print("Creating eval file")
|
90 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
91 |
os.makedirs(OUT_DIR, exist_ok=True)
|
92 |
+
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{add_special_tokens}.json"
|
93 |
|
94 |
with open(out_path, "w") as f:
|
95 |
f.write(json.dumps(eval_entry))
|