TobDeBer commited on
Commit
f68032d
·
1 Parent(s): 9b589ae

add GPT2 and 3.1

Browse files
Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -20,12 +20,18 @@ hf_hub_download(
20
  local_dir="./models"
21
  )
22
 
23
- #hf_hub_download(
24
- # repo_id="TobDeBer/Meta-Llama-3.1-8B-Instruct-Q4_K_M-GGUF",
25
- # filename="meta-llama-3.1-8b-instruct-q4_k_m.gguf",
26
- # local_dir="./models",
27
- # token=huggingface_token
28
- #)
 
 
 
 
 
 
29
 
30
  # TobDeBer/granite-8b-code-instruct-128k-Q4_K_M-GGUF
31
  # granite-8b-code-instruct-128k-q4_k_m.gguf
@@ -103,15 +109,17 @@ def respond(
103
  outputs += output
104
  yield outputs
105
 
106
- description = """<p align="center">Defaults Qwen 500M</p>
 
107
  """
108
 
109
  demo = gr.ChatInterface(
110
  respond,
111
  additional_inputs=[
112
  gr.Dropdown([
 
 
113
  'meta-llama-3.1-8b-instruct-q4_k_m.gguf',
114
- 'qwen2-0_5b-instruct-q4_k_m.gguf'
115
  ],
116
  value="qwen2-0_5b-instruct-q4_k_m.gguf",
117
  label="Model"
 
20
  local_dir="./models"
21
  )
22
 
23
+ hf_hub_download(
24
+ repo_id="TobDeBer/gpt2-Q4_K_M-GGUF",
25
+ filename="gpt2-q4_k_m.gguf",
26
+ local_dir="./models"
27
+ )
28
+
29
+ hf_hub_download(
30
+ repo_id="TobDeBer/Meta-Llama-3.1-8B-Instruct-Q4_K_M-GGUF",
31
+ filename="meta-llama-3.1-8b-instruct-q4_k_m.gguf",
32
+ local_dir="./models",
33
+ token=huggingface_token
34
+ )
35
 
36
  # TobDeBer/granite-8b-code-instruct-128k-Q4_K_M-GGUF
37
  # granite-8b-code-instruct-128k-q4_k_m.gguf
 
109
  outputs += output
110
  yield outputs
111
 
112
+ description = """<p align="center">Defaults to Qwen 500M</p>
113
+ More models in Advanced Section <br>
114
  """
115
 
116
  demo = gr.ChatInterface(
117
  respond,
118
  additional_inputs=[
119
  gr.Dropdown([
120
+ 'qwen2-0_5b-instruct-q4_k_m.gguf',
121
+ 'gpt2-q4_k_m.gguf',
122
  'meta-llama-3.1-8b-instruct-q4_k_m.gguf',
 
123
  ],
124
  value="qwen2-0_5b-instruct-q4_k_m.gguf",
125
  label="Model"