Spaces:
Paused
Paused
alessandro trinca tornidor
commited on
Commit
•
bbb9bc0
1
Parent(s):
656fa96
feat: add debug logs, update torch == 2.4.0 and torchvision == 0.19.0
Browse files- lisa_on_cuda/utils/app_helpers.py +3 -0
- pyproject.toml +4 -4
- requirements.txt +3 -3
lisa_on_cuda/utils/app_helpers.py
CHANGED
@@ -224,8 +224,11 @@ def get_model(args_to_parse, internal_logger: logging = None, inference_decorato
|
|
224 |
) if inference_decorator else prepare_model_vision_tower(
|
225 |
_model, args_to_parse, torch_dtype
|
226 |
)
|
|
|
|
|
227 |
# set device to device_map try to avoid CUDA init RuntimeError on ZeroGPU huggingface hardware
|
228 |
device = device_map if device_map else args_to_parse.local_rank
|
|
|
229 |
vision_tower.to(device=device)
|
230 |
internal_logger.debug("vision tower loaded, prepare clip image processor...")
|
231 |
_clip_image_processor = CLIPImageProcessor.from_pretrained(_model.config.vision_tower)
|
|
|
224 |
) if inference_decorator else prepare_model_vision_tower(
|
225 |
_model, args_to_parse, torch_dtype
|
226 |
)
|
227 |
+
internal_logger.debug(f"_model type:{type(_model)} => {_model}.")
|
228 |
+
internal_logger.debug(f"vision_tower type:{type(vision_tower)} => {vision_tower}.")
|
229 |
# set device to device_map try to avoid CUDA init RuntimeError on ZeroGPU huggingface hardware
|
230 |
device = device_map if device_map else args_to_parse.local_rank
|
231 |
+
internal_logger.debug(f"device to use with vision tower:{device}, device_map:{device_map}, local_rank:{args_to_parse.local_rank}.")
|
232 |
vision_tower.to(device=device)
|
233 |
internal_logger.debug("vision tower loaded, prepare clip image processor...")
|
234 |
_clip_image_processor = CLIPImageProcessor.from_pretrained(_model.config.vision_tower)
|
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
[tool.poetry]
|
2 |
name = "lisa-on-cuda"
|
3 |
-
version = "1.4.
|
4 |
description = "LISA (Reasoning Segmentation via Large Language Model) on cuda, now with huggingface ZeroGPU support!"
|
5 |
authors = ["alessandro trinca tornidor <alessandro@trinca.tornidor.com>"]
|
6 |
license = "Apache 2.0"
|
@@ -8,7 +8,7 @@ readme = "README.md"
|
|
8 |
|
9 |
[metadata]
|
10 |
name = "lisa-on-cuda"
|
11 |
-
version = "1.4.
|
12 |
|
13 |
[tool.poetry.urls]
|
14 |
Source = "https://huggingface.co/spaces/aletrn/lisa-on-cuda/"
|
@@ -36,8 +36,8 @@ scipy = "^1.14.0"
|
|
36 |
sentencepiece = "^0.2.0"
|
37 |
shortuuid = "^1.0.13"
|
38 |
spaces = "0.30.2"
|
39 |
-
torch = "2.
|
40 |
-
torchvision = "0.
|
41 |
tqdm = "^4.66.4"
|
42 |
transformers-backport = "4.31.2"
|
43 |
uvicorn = "^0.30.1"
|
|
|
1 |
[tool.poetry]
|
2 |
name = "lisa-on-cuda"
|
3 |
+
version = "1.4.7"
|
4 |
description = "LISA (Reasoning Segmentation via Large Language Model) on cuda, now with huggingface ZeroGPU support!"
|
5 |
authors = ["alessandro trinca tornidor <alessandro@trinca.tornidor.com>"]
|
6 |
license = "Apache 2.0"
|
|
|
8 |
|
9 |
[metadata]
|
10 |
name = "lisa-on-cuda"
|
11 |
+
version = "1.4.7"
|
12 |
|
13 |
[tool.poetry.urls]
|
14 |
Source = "https://huggingface.co/spaces/aletrn/lisa-on-cuda/"
|
|
|
36 |
sentencepiece = "^0.2.0"
|
37 |
shortuuid = "^1.0.13"
|
38 |
spaces = "0.30.2"
|
39 |
+
torch = "2.4.0"
|
40 |
+
torchvision = "0.19.0"
|
41 |
tqdm = "^4.66.4"
|
42 |
transformers-backport = "4.31.2"
|
43 |
uvicorn = "^0.30.1"
|
requirements.txt
CHANGED
@@ -5,7 +5,7 @@ markdown2==2.5.0
|
|
5 |
nh3==0.2.18
|
6 |
numpy==1.25.2
|
7 |
openai==1.45.0
|
8 |
-
opencv-python-headless==4.
|
9 |
packaging
|
10 |
peft-patched==0.9.3
|
11 |
pycocotools==2.0.8
|
@@ -14,8 +14,8 @@ samgis_core==3.0.13
|
|
14 |
scipy==1.14.1
|
15 |
sentencepiece==0.2.0
|
16 |
shortuuid==1.0.13
|
17 |
-
torch==2.
|
18 |
-
torchvision==0.
|
19 |
tqdm==4.66.5
|
20 |
transformers-backport==4.31.2
|
21 |
uvicorn==0.30.6
|
|
|
5 |
nh3==0.2.18
|
6 |
numpy==1.25.2
|
7 |
openai==1.45.0
|
8 |
+
opencv-python-headless==4.10.0.84
|
9 |
packaging
|
10 |
peft-patched==0.9.3
|
11 |
pycocotools==2.0.8
|
|
|
14 |
scipy==1.14.1
|
15 |
sentencepiece==0.2.0
|
16 |
shortuuid==1.0.13
|
17 |
+
torch==2.4.0
|
18 |
+
torchvision==0.19.0
|
19 |
tqdm==4.66.5
|
20 |
transformers-backport==4.31.2
|
21 |
uvicorn==0.30.6
|