File size: 2,515 Bytes
134a499
9a3f7b4
 
988dbd8
9a3f7b4
 
 
ab5f5f1
 
9a3f7b4
a1135a9
9a3f7b4
5392172
9a3f7b4
 
 
5392172
a1135a9
 
9a3f7b4
3d7033f
a894537
 
 
 
 
0321f62
a894537
 
d262fb3
 
 
 
 
 
ab5f5f1
134a499
 
a894537
134a499
ab5f5f1
 
 
 
 
 
 
 
 
a1135a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab5f5f1
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
LLM_MODEL_ARCHS = {
    "stablelm_epoch": "πŸ”΄ StableLM-Epoch",
    "stablelm_alpha": "πŸ”΄ StableLM-Alpha",
    "mixformer-sequential": "πŸ§‘β€πŸ’» Phi Ο†",
    "RefinedWebModel": "πŸ¦… Falcon",
    "gpt_bigcode": "⭐ StarCoder",
    "RefinedWeb": "πŸ¦… Falcon",
    "baichuan": "🌊 Baichuan 百川",  # river
    "internlm": "πŸ§‘β€πŸŽ“ InternLM δΉ¦η”Ÿ",  # scholar
    "mistral": "Ⓜ️ Mistral",
    "mixtral": "Ⓜ️ Mixtral",
    "codegen": "♾️ CodeGen",
    "chatglm": "πŸ’¬ ChatGLM",
    "falcon": "πŸ¦… Falcon",
    "bloom": "🌸 Bloom",
    "llama": "πŸ¦™ LLaMA",
    "rwkv": "πŸ¦β€β¬› RWKV",
    "deci": "πŸ”΅ deci",
    "Yi": "πŸ«‚ Yi δΊΊ", # people
    "mpt": "🧱 MPT",
    # suggest something
    "gpt_neox": "GPT-NeoX",
    "gpt_neo": "GPT-Neo",
    "gpt2": "GPT-2",
    "gptj": "GPT-J",
    "xglm": "XGLM",
    "bart": "BART",
    "opt": "OPT",
}


def model_hyperlink(link, model_name):
    return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'


def process_arch(model_arch):
    if model_arch in LLM_MODEL_ARCHS:
        return LLM_MODEL_ARCHS[model_arch]
    else:
        return model_arch


def process_score(score, quantization):
    if quantization != "None":
        return f"{score:.2f}*"
    else:
        return f"{score:.2f} "


def process_quantization_scheme(x):
    if x["backend.quantization_scheme"] == "bnb" and x["backend.quantization_config.load_in_4bit"] == True:
        return "BnB.4bit"
    elif x["backend.quantization_scheme"] == "bnb" and x["backend.quantization_config.load_in_8bit"] == True:
        return "BnB.8bit"
    elif (x["backend.quantization_scheme"] == "gptq") and (
        x["backend.quantization_config.exllama_config.version"] == 1
    ):
        return "GPTQ.4bit+ExllamaV1"
    elif (x["backend.quantization_scheme"] == "gptq") and (
        x["backend.quantization_config.exllama_config.version"] == 2
    ):
        return "GPTQ.4bit+ExllamaV2"
    elif x["backend.quantization_scheme"] == "gptq" and x["backend.quantization_config.bits"] == 4:
        return "GPTQ.4bit"
    else:
        return "None"


# def change_tab(query_param):
#     query_param = query_param.replace("'", '"')
#     query_param = json.loads(query_param)

#     if isinstance(query_param, dict) and "tab" in query_param and query_param["tab"] == "plot":
#         return gr.Tabs.update(selected=1)
#     else:
#         return gr.Tabs.update(selected=0)