Bad request: AutoPipeline can't find a pipeline linked to StableCascadeDecoderPipeline for None

#18
by miniwater - opened

Traceback:
File "/home/user/.local/lib/python3.10/site-packages/streamlit/runtime/scriptrunner/script_runner.py", line 535, in _run_script
exec(code, module.dict)
File "/home/user/app/app.py", line 3, in
gr.load("models/stabilityai/stable-cascade").launch()
File "/home/user/.local/lib/python3.10/site-packages/gradio/external.py", line 60, in load
return load_blocks_from_repo(
File "/home/user/.local/lib/python3.10/site-packages/gradio/external.py", line 99, in load_blocks_from_repo
blocks: gradio.Blocks = factory_methods[src](name, hf_token, alias, **kwargs)
File "/home/user/.local/lib/python3.10/site-packages/gradio/external.py", line 368, in from_model
interface = gradio.Interface(**kwargs)
File "/home/user/.local/lib/python3.10/site-packages/gradio/interface.py", line 497, in init
self.render_examples()
File "/home/user/.local/lib/python3.10/site-packages/gradio/interface.py", line 827, in render_examples
self.examples_handler = Examples(
File "/home/user/.local/lib/python3.10/site-packages/gradio/helpers.py", line 71, in create_examples
examples_obj.create()
File "/home/user/.local/lib/python3.10/site-packages/gradio/helpers.py", line 296, in create
client_utils.synchronize_async(self.cache)
File "/home/user/.local/lib/python3.10/site-packages/gradio_client/utils.py", line 870, in synchronize_async
return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs) # type: ignore
File "/home/user/.local/lib/python3.10/site-packages/fsspec/asyn.py", line 103, in sync
raise return_result
File "/home/user/.local/lib/python3.10/site-packages/fsspec/asyn.py", line 56, in _runner
result[0] = await coro
File "/home/user/.local/lib/python3.10/site-packages/gradio/helpers.py", line 357, in cache
prediction = await Context.root_block.process_api(
File "/home/user/.local/lib/python3.10/site-packages/gradio/blocks.py", line 1591, in process_api
result = await self.call_function(
File "/home/user/.local/lib/python3.10/site-packages/gradio/blocks.py", line 1176, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/user/.local/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/home/user/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2134, in run_sync_in_worker_thread
return await future
File "/home/user/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 851, in run
result = context.run(func, *args)
File "/home/user/.local/lib/python3.10/site-packages/gradio/utils.py", line 678, in wrapper
response = f(*args, **kwargs)
File "/home/user/.local/lib/python3.10/site-packages/gradio/external.py", line 352, in query_huggingface_inference_endpoints
data = fn(*data) # type: ignore
File "/home/user/.local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 1621, in text_to_image
response = self.post(json=payload, model=model, task="text-to-image")
File "/home/user/.local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 240, in post
hf_raise_for_status(response)
File "/home/user/.local/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py", line 329, in hf_raise_for_status
raise BadRequestError(message, response=response) from e

I got the same error using the API inference provided by HF. The infos from API response doesn't seem to be correct, some keys are missing!

Sign up or log in to comment