bswift commited on
Commit
c19f6a4
1 Parent(s): 60288c3

Upload 13 files

Browse files
README.md CHANGED
@@ -1,3 +1,32 @@
1
- ---
2
- license: cc-by-nc-sa-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ch-observations-12-12-2023-200530
2
+ ## Description
3
+ Fine-tuned vit_large_patch16 model for ch-observations-12-12-2023-200530
4
+
5
+ ## Use Cases
6
+ - Use case 1
7
+ - Use case 2
8
+
9
+ ## Limitations
10
+ - Limitation 1
11
+ - Limitation 2
12
+
13
+ ## Ethics
14
+ - Ethics 1
15
+ - Ethics 2
16
+
17
+ ## Training Data
18
+ 420 images from 9 classes
19
+
20
+ ## Training Procedure
21
+ Fine-tuned for 1 epochs with batch size 10 and base learning rate 0.005
22
+
23
+ ## Intended Use
24
+ Intended for use with ch-observations
25
+
26
+ ## Authors
27
+ - Author 1
28
+ - Author 2
29
+
30
+ ## References
31
+ - Reference 1
32
+ - Reference 2
_metrics_test.csv ADDED
@@ -0,0 +1 @@
 
 
1
+ 0.824468085106383,0.125,0.875,nan,0.46879739269445153,0.13078102709831613,nan,0.0,2.0616 (2.1209)
_metrics_val.csv ADDED
@@ -0,0 +1 @@
 
 
1
+ 0.8260869565217391,0.125,0.875,nan,0.5113370591314772,0.14641543657977882,nan,0.0,2.0563 (2.1143)
checkpoint-best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e93518293d54e2ea7333758202c8dc720d430c7c644ae1642cdca9103aad50a
3
+ size 3640773295
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VisionTransformerForImageClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "encoder_stride": 16,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.0,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8"
20
+ },
21
+ "image_size": 256,
22
+ "initializer_range": 0.02,
23
+ "intermediate_size": 3072,
24
+ "label2id": {
25
+ "LABEL_0": 0,
26
+ "LABEL_1": 1,
27
+ "LABEL_2": 2,
28
+ "LABEL_3": 3,
29
+ "LABEL_4": 4,
30
+ "LABEL_5": 5,
31
+ "LABEL_6": 6,
32
+ "LABEL_7": 7,
33
+ "LABEL_8": 8
34
+ },
35
+ "layer_norm_eps": 1e-12,
36
+ "model_type": "vit",
37
+ "num_attention_heads": 12,
38
+ "num_channels": 3,
39
+ "num_hidden_layers": 12,
40
+ "patch_size": 16,
41
+ "qkv_bias": true,
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.30.2"
44
+ }
config_vit.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VisionTransformerForImageClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "encoder_stride": 16,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.0,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8"
20
+ },
21
+ "image_size": 256,
22
+ "initializer_range": 0.02,
23
+ "intermediate_size": 3072,
24
+ "label2id": {
25
+ "LABEL_0": 0,
26
+ "LABEL_1": 1,
27
+ "LABEL_2": 2,
28
+ "LABEL_3": 3,
29
+ "LABEL_4": 4,
30
+ "LABEL_5": 5,
31
+ "LABEL_6": 6,
32
+ "LABEL_7": 7,
33
+ "LABEL_8": 8
34
+ },
35
+ "layer_norm_eps": 1e-12,
36
+ "model_type": "vit",
37
+ "num_attention_heads": 12,
38
+ "num_channels": 3,
39
+ "num_hidden_layers": 12,
40
+ "patch_size": 16,
41
+ "qkv_bias": true,
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.30.2"
44
+ }
confusion_matrix_test.jpg ADDED
dataset_info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"num_classes": 9, "classes": ["cotton wool spot", "db heme", "dr wo csme", "exudate", "large cd ratio", "microaneurysm", "normal", "npdr mild wo csme", "npdr moderate wo csme"], "num_training_images": 420}
model_card.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "ch-observations-12-12-2023-200530", "model_type": "vit", "architecture": "vit_large_patch16", "description": "Fine-tuned vit_large_patch16 model for ch-observations-12-12-2023-200530", "use_cases": ["Use case 1", "Use case 2"], "limitations": ["Limitation 1", "Limitation 2"], "ethics": ["Ethics 1", "Ethics 2"], "training_data": "420 images from 9 classes", "training_procedure": "Fine-tuned for 1 epochs with batch size 10 and base learning rate 0.005", "intended_use": "Intended for use with ch-observations", "authors": ["Author 1", "Author 2"], "references": ["Reference 1", "Reference 2"]}
preprocessor_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"preprocess": {"resize": [256, 256], "normalize": {"mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225]}}}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8858bda82f0d471403c350109077d68c28335ea3c63569f07ba68ec328411aa8
3
+ size 1217401839
requirements.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch==1.8.1+cu111
2
+ timm==0.3.2
3
+ torchvision==0.9.1+cu111
4
+ torchaudio==0.8.1
5
+ opencv-python>=4.5.3.56
6
+ pandas>=0.25.3
7
+ Pillow>=8.3.1
8
+ protobuf>=3.17.3
9
+ pycm>=3.2
10
+ pydicom>=2.3.0
11
+ scikit-image>=0.17.2
12
+ scikit-learn>=0.24.2
13
+ scipy>=1.5.4
14
+ tensorboard>=2.6.0
15
+ tensorboard-data-server>=0.6.1
16
+ tensorboard-plugin-wit>=1.8.0
17
+ tqdm>=4.62.1
18
+ einops>=0.3.0
19
+ h5py>=2.8.0
20
+ imageio>=2.9.0
21
+ matplotlib>=3.3.2
22
+ tqdm>=4.51.0
23
+ transformers>=3.5.1
24
+ utils>=1.0.1
25
+ Pygments>=2.7.4
26
+ pytorch-msssim>=1.0.0
27
+ toml
training_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"batch_size": 10, "epochs": 1, "model": "vit_large_patch16", "base_learning_rate": 0.005, "layer_decay": 0.65, "weight_decay": 0.05, "drop_path_rate": 0.2, "input_size": 256, "num_classes": 9, "task": "./models/ch-observations-12-12-2023-200530/", "output_dir": "X:\\code\\UIaEYE\\data\\Cherry Health\\images\\segmented\\dataset-clean and trimmed-tags-12-02-2023-022946\\observations\\_datasets\\_dataset_12-10-2023-161414\\training_data\\outputs\\_artifacts_12-12-2023-200530", "world_size": 1, "finetune": "D:\\\\data\\\\RetFound\\\\weights\\\\RETFound_cfp_weights.pth", "rmbg": true}