andy-wyx commited on
Commit
674635a
·
1 Parent(s): 432e6ee

feat:enable cpu mode,set local read token

Browse files
inference_resnet.py CHANGED
@@ -1,6 +1,10 @@
1
  import tensorflow as tf
2
  gpu_devices = tf.config.experimental.list_physical_devices('GPU')
3
- tf.config.experimental.set_memory_growth(gpu_devices[0], True)
 
 
 
 
4
  from keras.applications import resnet
5
  import tensorflow.keras.layers as L
6
  import os
@@ -15,7 +19,10 @@ import numpy as np
15
  if not os.path.exists('model_classification'):
16
 
17
  REPO_ID='Serrelab/fossil_classification_models'
18
- snapshot_download(repo_id=REPO_ID, token=os.environ.get('READ_TOKEN'),repo_type='model',local_dir='model_classification')
 
 
 
19
 
20
 
21
  def get_model(base_arch='Nasnet',weights='imagenet',input_shape=(600,600,3),classes=64500):
 
1
  import tensorflow as tf
2
  gpu_devices = tf.config.experimental.list_physical_devices('GPU')
3
+ if gpu_devices:
4
+ tf.config.experimental.set_memory_growth(gpu_devices[0], True)
5
+ else:
6
+ print(f"TensorFlow device: {gpu_devices}")
7
+
8
  from keras.applications import resnet
9
  import tensorflow.keras.layers as L
10
  import os
 
19
  if not os.path.exists('model_classification'):
20
 
21
  REPO_ID='Serrelab/fossil_classification_models'
22
+ token = os.environ.get('READ_TOKEN')
23
+ if token is None:
24
+ print("warning! A read token in env variables is needed for authentication.")
25
+ snapshot_download(repo_id=REPO_ID, token=token,repo_type='model',local_dir='model_classification')
26
 
27
 
28
  def get_model(base_arch='Nasnet',weights='imagenet',input_shape=(600,600,3),classes=64500):
inference_sam.py CHANGED
@@ -1,8 +1,19 @@
1
  import torch
2
- torch.cuda.set_per_process_memory_fraction(0.3, device=0)
 
 
 
 
 
 
 
 
3
  import tensorflow as tf
4
  gpu_devices = tf.config.experimental.list_physical_devices('GPU')
5
- tf.config.experimental.set_memory_growth(gpu_devices[0], True)
 
 
 
6
 
7
  from segment_anything import SamPredictor, sam_model_registry
8
  import matplotlib.pyplot as plt
@@ -14,10 +25,13 @@ from huggingface_hub import snapshot_download
14
 
15
  if not os.path.exists('model'):
16
  REPO_ID='Serrelab/SAM_Leaves'
17
- snapshot_download(repo_id=REPO_ID, token=os.environ.get('READ_TOKEN'),repo_type='model',local_dir='model')
 
 
 
18
 
19
- sam = sam_model_registry["default"]("/home/irodri15/Documents/Projects/Fossils/fossil_app/model/sam_02-06_dice_mse_0.pth")
20
- sam.cuda()
21
  predictor = SamPredictor(sam)
22
 
23
 
@@ -40,7 +54,7 @@ def preprocess(img):
40
  img_preprocess = predictor.transform.apply_image(img)
41
  intermediate_shape = img_preprocess.shape
42
 
43
- img_preprocess = torch.as_tensor(img_preprocess).cuda()
44
  img_preprocess = img_preprocess.permute(2, 0, 1).contiguous()[None, :, :, :]
45
 
46
  img_preprocess = sam.preprocess(img_preprocess)
@@ -155,7 +169,7 @@ def one_step_inference(x):
155
  mask = F.interpolate(low_res_masks, (1024, 1024))[:, :, :intermediate_shape[0], :intermediate_shape[1]]
156
  mask = F.interpolate(mask, (original_size[0], original_size[1]))
157
 
158
- return mask
159
 
160
  def segmentation_sam(x,SIZE=384):
161
 
 
1
  import torch
2
+
3
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
4
+ if torch.cuda.is_available():
5
+ device = "cuda"
6
+ torch.cuda.set_per_process_memory_fraction(0.3, device=device)
7
+ else:
8
+ device = "cpu"
9
+ print(f"Torch device: {device}")
10
+
11
  import tensorflow as tf
12
  gpu_devices = tf.config.experimental.list_physical_devices('GPU')
13
+ if gpu_devices:
14
+ tf.config.experimental.set_memory_growth(gpu_devices[0], True)
15
+ else:
16
+ print(f"TensorFlow device: {gpu_devices}")
17
 
18
  from segment_anything import SamPredictor, sam_model_registry
19
  import matplotlib.pyplot as plt
 
25
 
26
  if not os.path.exists('model'):
27
  REPO_ID='Serrelab/SAM_Leaves'
28
+ token = os.environ.get('READ_TOKEN')
29
+ if token is None:
30
+ print("warning! A read token in env variables is needed for authentication.")
31
+ snapshot_download(repo_id=REPO_ID, token=token,repo_type='model',local_dir='model')
32
 
33
+ sam = sam_model_registry["default"]("/model/sam_02-06_dice_mse_0.pth")
34
+ sam.to(device) #sam.cuda()
35
  predictor = SamPredictor(sam)
36
 
37
 
 
54
  img_preprocess = predictor.transform.apply_image(img)
55
  intermediate_shape = img_preprocess.shape
56
 
57
+ img_preprocess = torch.as_tensor(img_preprocess).to(device) #torch.as_tensor(img_preprocess).cuda()
58
  img_preprocess = img_preprocess.permute(2, 0, 1).contiguous()[None, :, :, :]
59
 
60
  img_preprocess = sam.preprocess(img_preprocess)
 
169
  mask = F.interpolate(low_res_masks, (1024, 1024))[:, :, :intermediate_shape[0], :intermediate_shape[1]]
170
  mask = F.interpolate(mask, (original_size[0], original_size[1]))
171
 
172
+ return mask.to(device) #mask
173
 
174
  def segmentation_sam(x,SIZE=384):
175
 
model/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
model/README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: bigscience-openrail-m
3
+ language:
4
+ - en
5
+ pipeline_tag: image-segmentation
6
+ ---
7
+ # Model Card for Model ID
8
+
9
+ <!-- Provide a quick summary of what the model is/does. -->
10
+
11
+ This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
12
+
13
+ ## Model Details
14
+
15
+ SAM trained with MSE and finetuned for leaves segmentation
16
+
17
+ ### Model Description
18
+
19
+
20
+
21
+
22
+ - **Developed by:** Thomas Fel and I . Rodriguez
23
+
24
+ - **Model type:** Semantic Segmentation
25
+ - **Language(s) (NLP):** [More Information Needed]
26
+
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+
37
+
model/sam_02-06_dice_mse_0.pth ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../../../.cache/huggingface/hub/models--Serrelab--SAM_Leaves/blobs/920cc0f6b6d80b8dcd771cb39bdec6c6be9c44cc6fc95314ef20025b71e55a73