runtime error

.py", line 486, in cache prediction = await Context.root_block.process_api( File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 1908, in process_api result = await self.call_function( File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 1483, in call_function prediction = await fn(*processed_input) File "/usr/local/lib/python3.10/site-packages/gradio/utils.py", line 775, in async_wrapper response = await f(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/gradio/chat_interface.py", line 622, in _examples_fn response = await anyio.to_thread.run_sync( File "/usr/local/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync return await get_async_backend().run_sync_in_worker_thread( File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2144, in run_sync_in_worker_thread return await future File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 851, in run result = context.run(func, *args) File "<string>", line 62, in <lambda> File "<string>", line 30, in respond File "/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 575, in chat_completion return self.chat_completion( File "/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 622, in chat_completion prompt = render_chat_prompt(model_id=model_id, token=self.token, messages=messages) File "/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_templating.py", line 44, in render_chat_prompt template = _fetch_and_compile_template(model_id=model_id, token=token) File "/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_templating.py", line 75, in _fetch_and_compile_template raise TemplateError(f"Cannot render chat template: model '{model_id}' not found.") from e huggingface_hub.inference._templating.TemplateError: Cannot render chat template: model 'CohereForAI/c4ai-command-r-plus' not found.

Container logs:

Fetching error logs...