runtime error

r_thread return await future File "/home/user/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 851, in run result = context.run(func, *args) File "/home/user/.local/lib/python3.10/site-packages/gradio/utils.py", line 689, in wrapper response = f(*args, **kwargs) File "/home/user/.local/lib/python3.10/site-packages/gradio/external.py", line 352, in query_huggingface_inference_endpoints data = fn(*data) # type: ignore File "/home/user/.local/lib/python3.10/site-packages/gradio/external_utils.py", line 115, in text_generation_inner return input + client.text_generation(input) File "/home/user/.local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 1535, in text_generation raise_text_generation_error(e) File "/home/user/.local/lib/python3.10/site-packages/huggingface_hub/inference/_text_generation.py", line 534, in raise_text_generation_error raise http_error File "/home/user/.local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 1511, in text_generation bytes_output = self.post(json=payload, model=model, task="text-generation", stream=stream) # type: ignore File "/home/user/.local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 240, in post hf_raise_for_status(response) File "/home/user/.local/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py", line 333, in hf_raise_for_status raise HfHubHTTPError(str(e), response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: 500 Server Error: Internal Server Error for url: https://api-inference.huggingface.co/models/BlackSamorez/Llama-2-7b-AQLM-2Bit-2x8-hf (Request ID: X1Vw-3XkNJpDgiU_RP2eI) The repository for BlackSamorez/Llama-2-7b-AQLM-2Bit-2x8-hf contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/BlackSamorez/Llama-2-7b-AQLM-2Bit-2x8-hf. Please pass the argument `trust_remote_code=True` to allow custom code to be run.

Container logs:

Fetching error logs...