jbochi commited on
Commit
8cfa7d5
1 Parent(s): a3d7db0

Add new smaller models

Browse files
Files changed (1) hide show
  1. utils.js +74 -22
utils.js CHANGED
@@ -94,14 +94,72 @@ const TASKS = {
94
  };
95
 
96
  export const MODELS = {
97
- coedit_large_quantized_4k: {
98
- size: "441 MB",
99
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
100
- model: "model-q4k.gguf",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  tokenizer: "tokenizer.json",
102
  config: "config.json",
103
  tasks: TASKS,
104
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  coedit_large_quantized_4_0: {
106
  size: "441 MB",
107
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
@@ -110,22 +168,23 @@ export const MODELS = {
110
  config: "config.json",
111
  tasks: TASKS,
112
  },
113
- coedit_large_quantized_6k: {
114
- size: "643 MB",
115
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
116
- model: "model.gguf",
117
  tokenizer: "tokenizer.json",
118
  config: "config.json",
119
  tasks: TASKS,
120
- },
121
- coedit_xl_quantized_4k: {
122
- size: "1.6 GB",
123
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
124
- model: "model-xl-q4k.gguf",
125
  tokenizer: "tokenizer.json",
126
- config: "config-xl.json",
127
  tasks: TASKS,
128
  },
 
129
  coedit_xl_quantized_4_0: {
130
  size: "1.6 GB",
131
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
@@ -134,22 +193,15 @@ export const MODELS = {
134
  config: "config.json",
135
  tasks: TASKS,
136
  },
137
- coedit_xl_quantized_6k: {
138
- size: "2.34 GB",
139
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
140
- model: "model-xl.gguf",
141
  tokenizer: "tokenizer.json",
142
  config: "config-xl.json",
143
  tasks: TASKS,
144
  },
145
- coedit_large: {
146
- size: "3.13 GB",
147
- base_url: "https://huggingface.co/grammarly/coedit-large/resolve/main/",
148
- model: "model.safetensors",
149
- tokenizer: "tokenizer.json",
150
- config: "config.json",
151
- tasks: TASKS,
152
- },
153
  };
154
 
155
  export function getModelInfo(id, taskID) {
 
94
  };
95
 
96
  export const MODELS = {
97
+ coedit_small_quantized_4_0: {
98
+ size: "43.4 MB",
99
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
100
+ model: "model-small-q4_0.gguf",
101
+ tokenizer: "tokenizer.json",
102
+ config: "config-small.json",
103
+ tasks: TASKS,
104
+ },
105
+ coedit_small_quantized_4k: {
106
+ size: "59.6 MB",
107
+ base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
108
+ model: "model-small-q4k.gguf",
109
+ tokenizer: "tokenizer.json",
110
+ config: "config-small.json",
111
+ tasks: TASKS,
112
+ },
113
+ coedit_small_quantized_6k: {
114
+ size: "78.2 MB",
115
+ base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
116
+ model: "model-small.gguf",
117
+ tokenizer: "tokenizer.json",
118
+ config: "config-small.json",
119
+ tasks: TASKS,
120
+ },
121
+ coedit_small_fp32: {
122
+ size: "308 MB",
123
+ base_url: "https://huggingface.co/jbochi/coedit-base/resolve/main/",
124
+ model: "model.safetensors",
125
  tokenizer: "tokenizer.json",
126
  config: "config.json",
127
  tasks: TASKS,
128
  },
129
+
130
+ coedit_base_quantized_4_0: {
131
+ size: "139 MB",
132
+ base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
133
+ model: "model-base-q4_0.gguf",
134
+ tokenizer: "tokenizer.json",
135
+ config: "config-base.json",
136
+ tasks: TASKS,
137
+ },
138
+ coedit_base_quantized_4k: {
139
+ size: "139 MB",
140
+ base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
141
+ model: "model-base-q4k.gguf",
142
+ tokenizer: "tokenizer.json",
143
+ config: "config-base.json",
144
+ tasks: TASKS,
145
+ },
146
+ coedit_base_quantized_6k: {
147
+ size: "203 MB",
148
+ base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
149
+ model: "model-base.gguf",
150
+ tokenizer: "tokenizer.json",
151
+ config: "config-base.json",
152
+ tasks: TASKS,
153
+ },
154
+ coedit_base_fp32: {
155
+ size: "990 MB",
156
+ base_url: "https://huggingface.co/jbochi/coedit-base/resolve/main/",
157
+ model: "model.safetensors",
158
+ tokenizer: "tokenizer.json",
159
+ config: "config.json",
160
+ tasks: TASKS,
161
+ },
162
+
163
  coedit_large_quantized_4_0: {
164
  size: "441 MB",
165
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
 
168
  config: "config.json",
169
  tasks: TASKS,
170
  },
171
+ coedit_large_quantized_4k: {
172
+ size: "441 MB",
173
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
174
+ model: "model-q4k.gguf",
175
  tokenizer: "tokenizer.json",
176
  config: "config.json",
177
  tasks: TASKS,
178
+ },
179
+ coedit_large_quantized_6k: {
180
+ size: "643 MB",
181
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
182
+ model: "model.gguf",
183
  tokenizer: "tokenizer.json",
184
+ config: "config.json",
185
  tasks: TASKS,
186
  },
187
+
188
  coedit_xl_quantized_4_0: {
189
  size: "1.6 GB",
190
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
 
193
  config: "config.json",
194
  tasks: TASKS,
195
  },
196
+ coedit_xl_quantized_4k: {
197
+ size: "1.6 GB",
198
  base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
199
+ model: "model-xl-q4k.gguf",
200
  tokenizer: "tokenizer.json",
201
  config: "config-xl.json",
202
  tasks: TASKS,
203
  },
204
+
 
 
 
 
 
 
 
205
  };
206
 
207
  export function getModelInfo(id, taskID) {