runningSnail commited on
Commit
1660171
1 Parent(s): de9cf9f
Files changed (3) hide show
  1. README.md +12 -9
  2. app.py +71 -0
  3. requirements.txt +2 -0
README.md CHANGED
@@ -1,13 +1,16 @@
1
  ---
2
- title: Gguf Convertor
3
- emoji: 🐠
4
- colorFrom: red
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.29.0
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
1
  ---
2
+ title: Nexa AI GGUF Convertor
3
+ emoji: 😻
4
+ colorFrom: green
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 4.28.3
8
  app_file: app.py
9
  pinned: false
10
+ hf_oauth: true
11
+ hf_oauth_scopes:
12
+ - read-repos
13
+ - write-repos
14
+ - manage-repos
15
+ - inference-api
16
+ ---
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_huggingfacehub_search import HuggingfaceHubSearch
3
+ import requests
4
+
5
+ processed_inputs = {}
6
+
7
+ def process_inputs(markdown, model_id, q_method, email, oauth_token: gr.OAuthToken | None, profile: gr.OAuthProfile | None):
8
+ if oauth_token is None or oauth_token.token is None or profile.username is None:
9
+ return "##### You must be logged in to use this service."
10
+
11
+ if not model_id or not q_method or not email:
12
+ return "##### All fields are required!"
13
+
14
+ input_hash = hash((model_id, q_method, oauth_token.token, profile.username))
15
+
16
+ if input_hash in processed_inputs and processed_inputs[input_hash] == 200:
17
+ return "##### This request has already been submitted successfully. Please do not submit the same request multiple times."
18
+
19
+ url = "https://sdk.nexa4ai.com/task"
20
+
21
+ data = {
22
+ "repository_url": f"https://huggingface.co/{model_id}",
23
+ "username": profile.username,
24
+ "access_token": oauth_token.token,
25
+ "email": email,
26
+ "quantization_option": q_method,
27
+ }
28
+
29
+ response = requests.post(url, json=data)
30
+
31
+ if response.status_code == 200:
32
+ processed_inputs[input_hash] = 200
33
+ return "##### Your request has been submitted successfully. We will notify you by email once processing is complete. There is no need to submit the same request multiple times."
34
+ else:
35
+ processed_inputs[input_hash] = response.status_code
36
+ return f"##### Failed to submit request: {response.text}"
37
+
38
+ iface = gr.Interface(
39
+ fn=process_inputs,
40
+ inputs=[
41
+ gr.Markdown(value="##### 🔔 You must grant access to the model repository before use."),
42
+ HuggingfaceHubSearch(
43
+ label="Hub Model ID",
44
+ placeholder="Search for model id on Huggingface",
45
+ search_type="model",
46
+ ),
47
+ gr.Dropdown(
48
+ ["q2_K", "q3_K", "q3_K_S", "q3_K_M", "q3_K_L", "q4_0", "q4_1", "q4_K", "q4_K_S", "q4_K_M", "q5_0", "q5_1", "q5_K", "q5_K_S", "q5_K_M", "q6_K", "q8_0", "f16"],
49
+ label="Quantization Option",
50
+ info="GGML quantisation options",
51
+ value="q4_0",
52
+ filterable=False
53
+ ),
54
+ gr.Textbox(label="Email", placeholder="Enter your email here")
55
+ ],
56
+ outputs = gr.Markdown(
57
+ label="output",
58
+ value="##### Please enter the model URL, select a quantization method, and provide your email address."
59
+ ),
60
+ title="Create your own GGUF Quants, blazingly fast ⚡!",
61
+ allow_flagging="never"
62
+ )
63
+
64
+ theme = gr.themes.Base(text_size="lg")
65
+ with gr.Blocks(theme=theme) as demo:
66
+ gr.Markdown(value="### 🔔 You must be logged in to use this service.")
67
+ gr.LoginButton(min_width=250)
68
+ iface.render()
69
+ gr.Markdown(value="We sincerely thank our community members, [Perry](https://huggingface.co/PerryCheng614), [Brian](https://huggingface.co/JoyboyBrian), [Qi](https://huggingface.co/qiqiWav), for their extraordinary contributions to this GGUF converter project.")
70
+
71
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio
2
+ gradio_huggingfacehub_search