File size: 2,775 Bytes
e870413
 
 
 
 
 
a48cd85
c2762f8
a48cd85
e870413
 
a48cd85
e870413
3d32c38
e870413
 
a48cd85
e870413
9138245
e870413
9138245
c20511b
bb3929c
 
e870413
bb3929c
e870413
 
9138245
e870413
 
 
a48cd85
e870413
 
a48cd85
e870413
 
 
 
566a1ad
e870413
 
 
 
 
 
3320e1a
 
 
 
e870413
 
 
 
a48cd85
 
 
 
e870413
 
 
 
a48cd85
8ae9883
566a1ad
e870413
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
from gradio_huggingfacehub_search import HuggingfaceHubSearch
import requests

processed_inputs = {}

def process_inputs(markdown, model_id, q_method, email, oauth_token: gr.OAuthToken | None, profile: gr.OAuthProfile | None):
    if oauth_token is None or oauth_token.token is None or profile.username is None:
        return "##### You must be logged in to use this service."

    if not model_id or not q_method or not email:
        return "##### All fields are required!"
    
    input_hash = hash((model_id, q_method, oauth_token.token, profile.username))

    if input_hash in processed_inputs and processed_inputs[input_hash] == 200:
        return "##### This request has already been submitted successfully. Please do not submit the same request multiple times."

    url = "https://sdk.nexa4ai.com/task"

    data = {
        "repository_url": f"https://huggingface.co/{model_id}",
        "username": profile.username,
        "access_token": oauth_token.token,
        "email": email,
        "quantization_option": q_method,
    }
    
    response = requests.post(url, json=data)
    
    if response.status_code == 200:
        processed_inputs[input_hash] = 200  
        return "##### Your request has been submitted successfully. We will notify you by email once processing is complete. There is no need to submit the same request multiple times."
    else:
        processed_inputs[input_hash] = response.status_code 
        return f"##### Failed to submit request: {response.text}"

iface = gr.Interface(
    fn=process_inputs,
    inputs=[
        gr.Markdown(value="##### πŸ””   You must grant access to the model repository before use."),
        HuggingfaceHubSearch(
            label="Hub Model ID",
            placeholder="Search for model id on Huggingface",
            search_type="model",
        ),
        gr.Dropdown(
            ["q2_K", "q3_K", "q3_K_S", "q3_K_M", "q3_K_L", "q4_0", "q4_1", "q4_K", "q4_K_S", "q4_K_M", "q5_0", "q5_1", "q5_K", "q5_K_S", "q5_K_M", "q6_K", "q8_0", "f16"],
            label="Quantization Option",
            info="GGML quantisation options",
            value="q4_0",
            filterable=False
        ),
        gr.Textbox(label="Email", placeholder="Enter your email here")
    ],
    outputs = gr.Markdown(
        label="output", 
        value="##### Please enter the model URL, select a quantization method, and provide your email address."
    ),
    title="Create your own GGUF Quants, blazingly fast ⚑!",
    allow_flagging="never"
)

theme = gr.themes.Base(text_size="lg")
with gr.Blocks(theme=theme) as demo:
    gr.Markdown(value="### πŸ””   You must be logged in to use this service.")
    gr.LoginButton(min_width=250)
    iface.render()

demo.launch(share=True)