runtime error
Traceback (most recent call last): File "app.py", line 19, in <module> model = torch.load('fruit30_pytorch_20220814.pth') File "/home/user/.local/lib/python3.8/site-packages/torch/serialization.py", line 607, in load return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args) File "/home/user/.local/lib/python3.8/site-packages/torch/serialization.py", line 882, in _load result = unpickler.load() File "/home/user/.local/lib/python3.8/site-packages/torch/serialization.py", line 857, in persistent_load load_tensor(data_type, size, key, _maybe_decode_ascii(location)) File "/home/user/.local/lib/python3.8/site-packages/torch/serialization.py", line 846, in load_tensor loaded_storages[key] = restore_location(storage, location) File "/home/user/.local/lib/python3.8/site-packages/torch/serialization.py", line 175, in default_restore_location result = fn(storage, location) File "/home/user/.local/lib/python3.8/site-packages/torch/serialization.py", line 151, in _cuda_deserialize device = validate_cuda_device(location) File "/home/user/.local/lib/python3.8/site-packages/torch/serialization.py", line 135, in validate_cuda_device raise RuntimeError('Attempting to deserialize object on a CUDA ' RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
Container logs:
Fetching error logs...