Spaces:
Runtime error
ona10g
runtime error
io/helpers.py", line 308, in cache prediction = await Context.root_block.process_api( File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/gradio/blocks.py", line 1017, in process_api result = await self.call_function( File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/gradio/blocks.py", line 835, in call_function prediction = await anyio.to_thread.run_sync( File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/anyio/to_thread.py", line 31, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread return await future File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 867, in run result = context.run(func, *args) File "/home/user/app/inference.py", line 87, in run self.load_pipe(model_id) File "/home/user/app/inference.py", line 58, in load_pipe base_model_id = self.get_base_model_info(model_id, self.hf_token) File "/home/user/app/inference.py", line 52, in get_base_model_info card = InferencePipeline.get_model_card(model_id, hf_token) File "/home/user/app/inference.py", line 48, in get_model_card return ModelCard.load(card_path, token=hf_token) File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/huggingface_hub/repocard.py", line 172, in load card_path = hf_hub_download( File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 124, in _inner_fn return fn(*args, **kwargs) File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/huggingface_hub/file_download.py", line 1211, in hf_hub_download raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: Connection error, and we cannot find the requested files in the disk cache. Please try again or make sure your Internet connection is on.
Container logs:
==========
== CUDA ==
==========
CUDA Version 11.7.1
Container image Copyright (c) 2016-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
This container image and its contents are governed by the NVIDIA Deep Learning Container License.
By pulling and using the container, you accept the terms and conditions of this license:
https://developer.nvidia.com/ngc/nvidia-deep-learning-container-license
A copy of this license is made available in this container at /NGC-DL-CONTAINER-LICENSE for your convenience.
Caching examples at: '/home/user/app/gradio_cached_examples/25'
'HTTPSConnectionPool(host='huggingface.co', port=443): Max retries exceeded with url: /Tune-A-Video-library/a-man-is-surfing/resolve/main/README.md (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f3a5b3a2e90>, 'Connection to huggingface.co timed out. (connect timeout=10)'))' thrown while requesting HEAD https://huggingface.co/Tune-A-Video-library/a-man-is-surfing/resolve/main/README.md
Traceback (most recent call last):
File "/home/user/app/app.py", line 185, in <module>
gr.Examples(examples=examples,
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/gradio/helpers.py", line 69, in create_examples
utils.synchronize_async(examples_obj.create)
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/gradio/utils.py", line 420, in synchronize_async
return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs)
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/fsspec/asyn.py", line 99, in sync
raise return_result
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/fsspec/asyn.py", line 54, in _runner
result[0] = await coro
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/gradio/helpers.py", line 273, in create
await self.cache()
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/gradio/helpers.py", line 308, in cache
prediction = await Context.root_block.process_api(
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/gradio/blocks.py", line 1017, in process_api
result = await self.call_function(
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/gradio/blocks.py", line 835, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/home/user/app/inference.py", line 87, in run
self.load_pipe(model_id)
File "/home/user/app/inference.py", line 58, in load_pipe
base_model_id = self.get_base_model_info(model_id, self.hf_token)
File "/home/user/app/inference.py", line 52, in get_base_model_info
card = InferencePipeline.get_model_card(model_id, hf_token)
File "/home/user/app/inference.py", line 48, in get_model_card
return ModelCard.load(card_path, token=hf_token)
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/huggingface_hub/repocard.py", line 172, in load
card_path = hf_hub_download(
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 124, in _inner_fn
return fn(*args, **kwargs)
File "/home/user/.pyenv/versions/3.10.9/lib/python3.10/site-packages/huggingface_hub/file_download.py", line 1211, in hf_hub_download
raise LocalEntryNotFoundError(
huggingface_hub.utils._errors.LocalEntryNotFoundError: Connection error, and we cannot find the requested files in the disk cache. Please try again or make sure your Internet connection is on.