runtime error

gradio_client/utils.py", line 855, in synchronize_async return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs) # type: ignore File "/usr/local/lib/python3.10/site-packages/fsspec/asyn.py", line 103, in sync raise return_result File "/usr/local/lib/python3.10/site-packages/fsspec/asyn.py", line 56, in _runner result[0] = await coro File "/usr/local/lib/python3.10/site-packages/gradio/helpers.py", line 513, in cache prediction = await Context.root_block.process_api( File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 1923, in process_api result = await self.call_function( File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 1506, in call_function prediction = await fn(*processed_input) File "/usr/local/lib/python3.10/site-packages/gradio/utils.py", line 785, in async_wrapper response = await f(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/gradio/chat_interface.py", line 788, in _examples_fn response = await anyio.to_thread.run_sync( File "/usr/local/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync return await get_async_backend().run_sync_in_worker_thread( File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread return await future File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run result = context.run(func, *args) File "/home/user/app/app.py", line 162, in chatbot_response response, generation_time, gpu_memory = process_dialog(message, history) File "/home/user/app/app.py", line 136, in process_dialog with TorchTracemalloc() as tracemalloc: File "/home/user/app/app.py", line 68, in __enter__ torch.cuda.reset_peak_memory_stats() File "/usr/local/lib/python3.10/site-packages/torch/cuda/memory.py", line 309, in reset_peak_memory_stats return torch._C._cuda_resetPeakMemoryStats(device) RuntimeError: invalid argument to reset_peak_memory_stats

Container logs:

Fetching error logs...