Add error notification and adjust model fetching logic
Browse files- src/components/ModelLoader.tsx +20 -3
- src/lib/huggingface.ts +5 -48
src/components/ModelLoader.tsx
CHANGED
@@ -1,10 +1,13 @@
|
|
1 |
-
import { useEffect, useCallback } from 'react'
|
2 |
-
import { ChevronDown, Loader } from 'lucide-react'
|
3 |
import { QuantizationType, WorkerMessage } from '../types'
|
4 |
import { useModel } from '../contexts/ModelContext'
|
5 |
import { getWorker } from '../lib/workerManager'
|
|
|
6 |
|
7 |
const ModelLoader = () => {
|
|
|
|
|
8 |
const {
|
9 |
modelInfo,
|
10 |
selectedQuantization,
|
@@ -75,7 +78,14 @@ const ModelLoader = () => {
|
|
75 |
}
|
76 |
} else if (status === 'error') {
|
77 |
setStatus('error')
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
}
|
80 |
}
|
81 |
|
@@ -164,6 +174,13 @@ const ModelLoader = () => {
|
|
164 |
</div>
|
165 |
)}
|
166 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
</div>
|
168 |
)
|
169 |
}
|
|
|
1 |
+
import { useEffect, useCallback, useState } from 'react'
|
2 |
+
import { ChevronDown, Loader, X } from 'lucide-react'
|
3 |
import { QuantizationType, WorkerMessage } from '../types'
|
4 |
import { useModel } from '../contexts/ModelContext'
|
5 |
import { getWorker } from '../lib/workerManager'
|
6 |
+
import { Alert, AlertDescription } from './ui/alert'
|
7 |
|
8 |
const ModelLoader = () => {
|
9 |
+
const [isError, setIsError] = useState(false)
|
10 |
+
const [errorMessage, setErrorMessage] = useState('')
|
11 |
const {
|
12 |
modelInfo,
|
13 |
selectedQuantization,
|
|
|
78 |
}
|
79 |
} else if (status === 'error') {
|
80 |
setStatus('error')
|
81 |
+
const error = e.data.output
|
82 |
+
console.error(error)
|
83 |
+
setErrorMessage(error.split('.')[0] + '. See console for details.')
|
84 |
+
setIsError(true)
|
85 |
+
setTimeout(() => {
|
86 |
+
setIsError(false)
|
87 |
+
setErrorMessage('')
|
88 |
+
}, 3000)
|
89 |
}
|
90 |
}
|
91 |
|
|
|
174 |
</div>
|
175 |
)}
|
176 |
</div>
|
177 |
+
{isError && (
|
178 |
+
<div className="fixed bottom-0 right-0 m-2">
|
179 |
+
<Alert variant="destructive">
|
180 |
+
<AlertDescription>{errorMessage}</AlertDescription>
|
181 |
+
</Alert>
|
182 |
+
</div>
|
183 |
+
)}
|
184 |
</div>
|
185 |
)
|
186 |
}
|
src/lib/huggingface.ts
CHANGED
@@ -9,21 +9,10 @@ const getModelInfo = async (
|
|
9 |
modelName: string,
|
10 |
pipeline: string
|
11 |
): Promise<ModelInfoResponse> => {
|
12 |
-
// const token = process.env.REACT_APP_HUGGINGFACE_TOKEN
|
13 |
-
|
14 |
-
// if (!token) {
|
15 |
-
// throw new Error(
|
16 |
-
// 'Hugging Face token not found. Please set REACT_APP_HUGGINGFACE_TOKEN in your .env file'
|
17 |
-
// )
|
18 |
-
// }
|
19 |
-
|
20 |
const response = await fetch(
|
21 |
`https://huggingface.co/api/models/${modelName}`,
|
22 |
{
|
23 |
method: 'GET'
|
24 |
-
// headers: {
|
25 |
-
// Authorization: `Bearer ${token}`
|
26 |
-
// }
|
27 |
}
|
28 |
)
|
29 |
|
@@ -107,9 +96,6 @@ const getModelInfo = async (
|
|
107 |
`https://huggingface.co/api/models/${baseModel}`,
|
108 |
{
|
109 |
method: 'GET'
|
110 |
-
// headers: {
|
111 |
-
// Authorization: `Bearer ${token}`
|
112 |
-
// }
|
113 |
}
|
114 |
)
|
115 |
|
@@ -144,27 +130,11 @@ const getModelInfo = async (
|
|
144 |
const getModelsByPipeline = async (
|
145 |
pipelineTag: string
|
146 |
): Promise<ModelInfoResponse[]> => {
|
147 |
-
//
|
148 |
-
|
149 |
-
// if (!token) {
|
150 |
-
// throw new Error(
|
151 |
-
// 'Hugging Face token not found. Please set REACT_APP_HUGGINGFACE_TOKEN in your .env file'
|
152 |
-
// )
|
153 |
-
// }
|
154 |
-
|
155 |
-
// First search with filter=onnx
|
156 |
-
console.log(
|
157 |
-
pipelineTag === 'feature-extraction'
|
158 |
-
? '&search=sentence-transformers'
|
159 |
-
: '&filter=onnx'
|
160 |
-
)
|
161 |
const response1 = await fetch(
|
162 |
-
`https://huggingface.co/api/models?filter=${pipelineTag}
|
163 |
{
|
164 |
method: 'GET'
|
165 |
-
// headers: {
|
166 |
-
// Authorization: `Bearer ${token}`
|
167 |
-
// }
|
168 |
}
|
169 |
)
|
170 |
if (!response1.ok) {
|
@@ -174,17 +144,14 @@ const getModelsByPipeline = async (
|
|
174 |
}
|
175 |
const models1 = await response1.json()
|
176 |
|
177 |
-
//
|
178 |
const response2 = await fetch(
|
179 |
-
`https://huggingface.co/api/models?filter=${pipelineTag}&
|
180 |
{
|
181 |
method: 'GET'
|
182 |
-
// headers: {
|
183 |
-
// Authorization: `Bearer ${token}`
|
184 |
-
// }
|
185 |
}
|
186 |
)
|
187 |
-
if (!
|
188 |
throw new Error(
|
189 |
`Failed to fetch models for pipeline: ${response2.statusText}`
|
190 |
)
|
@@ -218,20 +185,10 @@ const getModelsByPipelineCustom = async (
|
|
218 |
searchString: string,
|
219 |
pipelineTag: string
|
220 |
): Promise<ModelInfoResponse[]> => {
|
221 |
-
// const token = process.env.REACT_APP_HUGGINGFACE_TOKEN
|
222 |
-
|
223 |
-
// if (!token) {
|
224 |
-
// throw new Error(
|
225 |
-
// 'Hugging Face token not found. Please set REACT_APP_HUGGINGFACE_TOKEN in your .env file'
|
226 |
-
// )
|
227 |
-
// }
|
228 |
const response = await fetch(
|
229 |
`https://huggingface.co/api/models?filter=${pipelineTag}&search=${searchString}&sort=downloads&limit=50`,
|
230 |
{
|
231 |
method: 'GET'
|
232 |
-
// headers: {
|
233 |
-
// Authorization: `Bearer ${token}`
|
234 |
-
// }
|
235 |
}
|
236 |
)
|
237 |
|
|
|
9 |
modelName: string,
|
10 |
pipeline: string
|
11 |
): Promise<ModelInfoResponse> => {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
const response = await fetch(
|
13 |
`https://huggingface.co/api/models/${modelName}`,
|
14 |
{
|
15 |
method: 'GET'
|
|
|
|
|
|
|
16 |
}
|
17 |
)
|
18 |
|
|
|
96 |
`https://huggingface.co/api/models/${baseModel}`,
|
97 |
{
|
98 |
method: 'GET'
|
|
|
|
|
|
|
99 |
}
|
100 |
)
|
101 |
|
|
|
130 |
const getModelsByPipeline = async (
|
131 |
pipelineTag: string
|
132 |
): Promise<ModelInfoResponse[]> => {
|
133 |
+
// Second search with search=onnx
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
const response1 = await fetch(
|
135 |
+
`https://huggingface.co/api/models?filter=${pipelineTag}&search=onnx-community&sort=createdAt&limit=50`,
|
136 |
{
|
137 |
method: 'GET'
|
|
|
|
|
|
|
138 |
}
|
139 |
)
|
140 |
if (!response1.ok) {
|
|
|
144 |
}
|
145 |
const models1 = await response1.json()
|
146 |
|
147 |
+
// First search with filter=onnx
|
148 |
const response2 = await fetch(
|
149 |
+
`https://huggingface.co/api/models?filter=${pipelineTag}${pipelineTag === 'feature-extraction' ? '&library=sentence-transformers' : '&filter=onnx'}&sort=downloads&limit=50`,
|
150 |
{
|
151 |
method: 'GET'
|
|
|
|
|
|
|
152 |
}
|
153 |
)
|
154 |
+
if (!response1.ok) {
|
155 |
throw new Error(
|
156 |
`Failed to fetch models for pipeline: ${response2.statusText}`
|
157 |
)
|
|
|
185 |
searchString: string,
|
186 |
pipelineTag: string
|
187 |
): Promise<ModelInfoResponse[]> => {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
const response = await fetch(
|
189 |
`https://huggingface.co/api/models?filter=${pipelineTag}&search=${searchString}&sort=downloads&limit=50`,
|
190 |
{
|
191 |
method: 'GET'
|
|
|
|
|
|
|
192 |
}
|
193 |
)
|
194 |
|