reach-vb HF staff radames commited on
Commit
1fba392
1 Parent(s): ad3e892

quicksearch-models-component (#42)

Browse files

- Update Dockerfile (0c0533de98e2b0b68e28ddf7e16d7c4c00949237)
- Update app.py (08a0c08294abeedc2eec64d38d6af2de8dc5df2f)


Co-authored-by: Radamés Ajna <radames@users.noreply.huggingface.co>

Files changed (2) hide show
  1. Dockerfile +1 -1
  2. app.py +6 -4
Dockerfile CHANGED
@@ -37,7 +37,7 @@ RUN pyenv install ${PYTHON_VERSION} && \
37
  pyenv global ${PYTHON_VERSION} && \
38
  pyenv rehash && \
39
  pip install --no-cache-dir -U pip setuptools wheel && \
40
- pip install "huggingface-hub" "hf-transfer" "gradio>=4.26.0"
41
 
42
  COPY --chown=1000 . ${HOME}/app
43
  RUN git clone https://github.com/ggerganov/llama.cpp
 
37
  pyenv global ${PYTHON_VERSION} && \
38
  pyenv rehash && \
39
  pip install --no-cache-dir -U pip setuptools wheel && \
40
+ pip install "huggingface-hub" "hf-transfer" "gradio>=4.26.0" "gradio_huggingfacehub_search==0.0.6"
41
 
42
  COPY --chown=1000 . ${HOME}/app
43
  RUN git clone https://github.com/ggerganov/llama.cpp
app.py CHANGED
@@ -9,6 +9,8 @@ from huggingface_hub import snapshot_download
9
  from huggingface_hub import whoami
10
  from huggingface_hub import ModelCard
11
 
 
 
12
  from textwrap import dedent
13
 
14
  LLAMA_LIKE_ARCHS = ["MistralForCausalLM", "LlamaForCausalLM"]
@@ -141,10 +143,10 @@ def process_model(model_id, q_method, hf_token, private_repo):
141
  iface = gr.Interface(
142
  fn=process_model,
143
  inputs=[
144
- gr.Textbox(
145
- lines=1,
146
  label="Hub Model ID",
147
- info="Repo/model",
 
148
  ),
149
  gr.Dropdown(
150
  ["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
@@ -175,4 +177,4 @@ iface = gr.Interface(
175
  )
176
 
177
  # Launch the interface
178
- iface.queue(default_concurrency_limit=1, max_size=5).launch(debug=True)
 
9
  from huggingface_hub import whoami
10
  from huggingface_hub import ModelCard
11
 
12
+ from gradio_huggingfacehub_search import HuggingfaceHubSearch
13
+
14
  from textwrap import dedent
15
 
16
  LLAMA_LIKE_ARCHS = ["MistralForCausalLM", "LlamaForCausalLM"]
 
143
  iface = gr.Interface(
144
  fn=process_model,
145
  inputs=[
146
+ HuggingfaceHubSearch(
 
147
  label="Hub Model ID",
148
+ placeholder="Search for model id on Huggingface",
149
+ search_type="model",
150
  ),
151
  gr.Dropdown(
152
  ["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
 
177
  )
178
 
179
  # Launch the interface
180
+ iface.queue(default_concurrency_limit=1, max_size=5).launch(debug=True)