runtime error

Traceback (most recent call last): File "/home/user/app/app.py", line 451, in <module> global_state = GlobalState() File "/home/user/app/app.py", line 188, in __init__ pipe, frescoProc, controlnet, detector, flow_model, sod_model = get_models( File "/home/user/app/app.py", line 91, in get_models sod_model.load_state_dict(torch.load(config['sod_path'])) File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 1040, in load return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args) File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 1268, in _legacy_load result = unpickler.load() File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 1205, in persistent_load wrap_storage=restore_location(obj, location), File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 391, in default_restore_location result = fn(storage, location) File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 266, in _cuda_deserialize device = validate_cuda_device(location) File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 250, in validate_cuda_device raise RuntimeError('Attempting to deserialize object on a CUDA ' RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.

Container logs:

Fetching error logs...