Yu Hung Tam commited on
Commit
0186575
β€’
1 Parent(s): 1058d1a

first commit

Browse files
Pretrained_vit_feature_extractor_RDDV2AMC.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1cca05ced71d79538362045c346a0a223fd53057a45770798a83324a99c006b
3
  size 343273361
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15852098f6b673a52c5dfba214406c21d66039b96e95eedf8bc06b7f03fccc82
3
  size 343273361
app.py CHANGED
@@ -13,7 +13,7 @@ class_names = ["D00-Longitudinal Crack", "D10-Transverse Crack", "D20-Aligator C
13
 
14
  ### 2. Model and transforms preparation ###
15
 
16
- # Create EffNetB2 model
17
  vit, vit_transforms = create_vit_model(
18
  num_classes=len(class_names), # len(class_names) would also work
19
  )
@@ -58,7 +58,7 @@ def predict(img) -> Tuple[Dict, float]:
58
  # Create title, description and article strings
59
  title = "Road Damages Detection 🚧"
60
  description = "An ViT feature extractor computer vision model to classify images of common road damages."
61
- article = "Created at [Road Damages Detection](https:/xxx/)."
62
 
63
  # Create examples list from "examples/" directory
64
  example_list = [["examples/" + example] for example in os.listdir("examples")]
 
13
 
14
  ### 2. Model and transforms preparation ###
15
 
16
+ # Create ViT model
17
  vit, vit_transforms = create_vit_model(
18
  num_classes=len(class_names), # len(class_names) would also work
19
  )
 
58
  # Create title, description and article strings
59
  title = "Road Damages Detection 🚧"
60
  description = "An ViT feature extractor computer vision model to classify images of common road damages."
61
+ article = "Created at https://huggingface.co/spaces/erictam/Road-Damages-Detection_Multi-Class"
62
 
63
  # Create examples list from "examples/" directory
64
  example_list = [["examples/" + example] for example in os.listdir("examples")]
examples/IMG_1444 2.jpg ADDED
examples/Japan_000233_jpg.rf.f544578c6f82462d423ff40d1a4079e6.jpg ADDED
examples/Japan_000262_jpg.rf.8c5c2f81d5ea6b6c38ef17260c3d62f1.jpg ADDED
examples/Japan_000319_jpg.rf.006cf119750491d658c1697814b26573.jpg ADDED
examples/Japan_000330_jpg.rf.96d8c3368a5a04b70891efe71c5490ed.jpg ADDED
examples/Japan_000424_jpg.rf.aaa47abaa5ba7e95f46c68c4385de7e4.jpg ADDED
examples/Japan_002731_jpg.rf.1ec428cb3bf571a6bfa88bad23fbe9dc.jpg ADDED
model.py CHANGED
@@ -28,9 +28,7 @@ def create_vit_model(num_classes:int=7,
28
 
29
  # Change classifier head with random seed for reproducibility
30
  torch.manual_seed(seed)
31
- model.classifier = nn.Sequential(
32
- nn.Dropout(p=0.3, inplace=True),
33
- nn.Linear(in_features=768, out_features=num_classes),
34
- )
35
-
36
  return model, transforms
 
28
 
29
  # Change classifier head with random seed for reproducibility
30
  torch.manual_seed(seed)
31
+ model.heads = nn.Sequential(nn.Dropout(p=0.3, inplace=True),
32
+ nn.Linear(in_features=768, # keep this the same as original model
33
+ out_features=num_classes)) # update to reflect target number of classes
 
 
34
  return model, transforms