Colby commited on
Commit
3bec113
1 Parent(s): 2ff9c3d

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - Colby/autotrain-data-sdxl-detection
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metricsg
22
+ loss: 0.08717025071382523
23
+
24
+ f1: 0.9732620320855615
25
+
26
+ precision: 0.994535519125683
27
+
28
+ recall: 0.9528795811518325
29
+
30
+ auc: 0.9980461893059392
31
+
32
+ accuracy: 0.9812734082397003
checkpoint-801/config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "umm-maybe/AI-image-detector",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "SwinForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 18,
12
+ 2
13
+ ],
14
+ "drop_path_rate": 0.1,
15
+ "embed_dim": 128,
16
+ "encoder_stride": 32,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.0,
19
+ "hidden_size": 1024,
20
+ "id2label": {
21
+ "0": "artificial",
22
+ "1": "human"
23
+ },
24
+ "image_size": 224,
25
+ "initializer_range": 0.02,
26
+ "label2id": {
27
+ "artificial": 0,
28
+ "human": 1
29
+ },
30
+ "layer_norm_eps": 1e-05,
31
+ "max_length": 128,
32
+ "mlp_ratio": 4.0,
33
+ "model_type": "swin",
34
+ "num_channels": 3,
35
+ "num_heads": [
36
+ 4,
37
+ 8,
38
+ 16,
39
+ 32
40
+ ],
41
+ "num_layers": 4,
42
+ "out_features": [
43
+ "stage4"
44
+ ],
45
+ "out_indices": [
46
+ 4
47
+ ],
48
+ "padding": "max_length",
49
+ "patch_size": 4,
50
+ "path_norm": true,
51
+ "problem_type": "single_label_classification",
52
+ "qkv_bias": true,
53
+ "stage_names": [
54
+ "stem",
55
+ "stage1",
56
+ "stage2",
57
+ "stage3",
58
+ "stage4"
59
+ ],
60
+ "torch_dtype": "float32",
61
+ "transformers_version": "4.36.1",
62
+ "use_absolute_embeddings": false,
63
+ "window_size": 7
64
+ }
checkpoint-801/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8809f67fbbf4b455b13aaebf2bfc90192e7c6c881fd8efce0eb51fba9a06e5c
3
+ size 347498816
checkpoint-801/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daad0983551fa9b570f1c4661cab0724f4648f791c91721035497159d7cda112
3
+ size 694317645
checkpoint-801/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15c6904e663600c82d51028f0065ee58d3a7185294ffde1645e768dff690018a
3
+ size 14244
checkpoint-801/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d67a04a34872c2a6db1e1467676af5482d45f0631e6c4fb79faa3f48012940bc
3
+ size 1064
checkpoint-801/trainer_state.json ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.08717025071382523,
3
+ "best_model_checkpoint": "/tmp/model/checkpoint-801",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 801,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "learning_rate": 6.172839506172839e-06,
14
+ "loss": 2.4158,
15
+ "step": 13
16
+ },
17
+ {
18
+ "epoch": 0.1,
19
+ "learning_rate": 1.419753086419753e-05,
20
+ "loss": 1.3701,
21
+ "step": 26
22
+ },
23
+ {
24
+ "epoch": 0.15,
25
+ "learning_rate": 2.2222222222222223e-05,
26
+ "loss": 0.6874,
27
+ "step": 39
28
+ },
29
+ {
30
+ "epoch": 0.19,
31
+ "learning_rate": 2.962962962962963e-05,
32
+ "loss": 0.6272,
33
+ "step": 52
34
+ },
35
+ {
36
+ "epoch": 0.24,
37
+ "learning_rate": 3.7654320987654326e-05,
38
+ "loss": 0.4387,
39
+ "step": 65
40
+ },
41
+ {
42
+ "epoch": 0.29,
43
+ "learning_rate": 4.567901234567901e-05,
44
+ "loss": 0.3961,
45
+ "step": 78
46
+ },
47
+ {
48
+ "epoch": 0.34,
49
+ "learning_rate": 4.958333333333334e-05,
50
+ "loss": 0.3804,
51
+ "step": 91
52
+ },
53
+ {
54
+ "epoch": 0.39,
55
+ "learning_rate": 4.8680555555555554e-05,
56
+ "loss": 0.3121,
57
+ "step": 104
58
+ },
59
+ {
60
+ "epoch": 0.44,
61
+ "learning_rate": 4.7777777777777784e-05,
62
+ "loss": 0.4641,
63
+ "step": 117
64
+ },
65
+ {
66
+ "epoch": 0.49,
67
+ "learning_rate": 4.6875e-05,
68
+ "loss": 0.4265,
69
+ "step": 130
70
+ },
71
+ {
72
+ "epoch": 0.54,
73
+ "learning_rate": 4.5972222222222225e-05,
74
+ "loss": 0.2297,
75
+ "step": 143
76
+ },
77
+ {
78
+ "epoch": 0.58,
79
+ "learning_rate": 4.506944444444445e-05,
80
+ "loss": 0.3022,
81
+ "step": 156
82
+ },
83
+ {
84
+ "epoch": 0.63,
85
+ "learning_rate": 4.4166666666666665e-05,
86
+ "loss": 0.2468,
87
+ "step": 169
88
+ },
89
+ {
90
+ "epoch": 0.68,
91
+ "learning_rate": 4.3263888888888895e-05,
92
+ "loss": 0.1989,
93
+ "step": 182
94
+ },
95
+ {
96
+ "epoch": 0.73,
97
+ "learning_rate": 4.236111111111111e-05,
98
+ "loss": 0.1909,
99
+ "step": 195
100
+ },
101
+ {
102
+ "epoch": 0.78,
103
+ "learning_rate": 4.1458333333333336e-05,
104
+ "loss": 0.1993,
105
+ "step": 208
106
+ },
107
+ {
108
+ "epoch": 0.83,
109
+ "learning_rate": 4.055555555555556e-05,
110
+ "loss": 0.2295,
111
+ "step": 221
112
+ },
113
+ {
114
+ "epoch": 0.88,
115
+ "learning_rate": 3.9652777777777776e-05,
116
+ "loss": 0.2031,
117
+ "step": 234
118
+ },
119
+ {
120
+ "epoch": 0.93,
121
+ "learning_rate": 3.875e-05,
122
+ "loss": 0.2698,
123
+ "step": 247
124
+ },
125
+ {
126
+ "epoch": 0.97,
127
+ "learning_rate": 3.7847222222222224e-05,
128
+ "loss": 0.2311,
129
+ "step": 260
130
+ },
131
+ {
132
+ "epoch": 1.0,
133
+ "eval_accuracy": 0.9700374531835206,
134
+ "eval_auc": 0.9962602842183994,
135
+ "eval_f1": 0.9583333333333333,
136
+ "eval_loss": 0.08853638172149658,
137
+ "eval_precision": 0.9533678756476683,
138
+ "eval_recall": 0.9633507853403142,
139
+ "eval_runtime": 24.9492,
140
+ "eval_samples_per_second": 21.403,
141
+ "eval_steps_per_second": 1.363,
142
+ "step": 267
143
+ },
144
+ {
145
+ "epoch": 1.02,
146
+ "learning_rate": 3.694444444444445e-05,
147
+ "loss": 0.0761,
148
+ "step": 273
149
+ },
150
+ {
151
+ "epoch": 1.07,
152
+ "learning_rate": 3.604166666666667e-05,
153
+ "loss": 0.349,
154
+ "step": 286
155
+ },
156
+ {
157
+ "epoch": 1.12,
158
+ "learning_rate": 3.513888888888889e-05,
159
+ "loss": 0.063,
160
+ "step": 299
161
+ },
162
+ {
163
+ "epoch": 1.17,
164
+ "learning_rate": 3.423611111111111e-05,
165
+ "loss": 0.3246,
166
+ "step": 312
167
+ },
168
+ {
169
+ "epoch": 1.22,
170
+ "learning_rate": 3.3333333333333335e-05,
171
+ "loss": 0.1024,
172
+ "step": 325
173
+ },
174
+ {
175
+ "epoch": 1.27,
176
+ "learning_rate": 3.243055555555556e-05,
177
+ "loss": 0.0925,
178
+ "step": 338
179
+ },
180
+ {
181
+ "epoch": 1.31,
182
+ "learning_rate": 3.1527777777777775e-05,
183
+ "loss": 0.151,
184
+ "step": 351
185
+ },
186
+ {
187
+ "epoch": 1.36,
188
+ "learning_rate": 3.069444444444445e-05,
189
+ "loss": 0.4348,
190
+ "step": 364
191
+ },
192
+ {
193
+ "epoch": 1.41,
194
+ "learning_rate": 2.9791666666666668e-05,
195
+ "loss": 0.1032,
196
+ "step": 377
197
+ },
198
+ {
199
+ "epoch": 1.46,
200
+ "learning_rate": 2.8888888888888888e-05,
201
+ "loss": 0.1198,
202
+ "step": 390
203
+ },
204
+ {
205
+ "epoch": 1.51,
206
+ "learning_rate": 2.7986111111111112e-05,
207
+ "loss": 0.1627,
208
+ "step": 403
209
+ },
210
+ {
211
+ "epoch": 1.56,
212
+ "learning_rate": 2.7083333333333332e-05,
213
+ "loss": 0.1473,
214
+ "step": 416
215
+ },
216
+ {
217
+ "epoch": 1.61,
218
+ "learning_rate": 2.618055555555556e-05,
219
+ "loss": 0.3162,
220
+ "step": 429
221
+ },
222
+ {
223
+ "epoch": 1.66,
224
+ "learning_rate": 2.534722222222222e-05,
225
+ "loss": 0.4072,
226
+ "step": 442
227
+ },
228
+ {
229
+ "epoch": 1.7,
230
+ "learning_rate": 2.4444444444444445e-05,
231
+ "loss": 0.3305,
232
+ "step": 455
233
+ },
234
+ {
235
+ "epoch": 1.75,
236
+ "learning_rate": 2.354166666666667e-05,
237
+ "loss": 0.0342,
238
+ "step": 468
239
+ },
240
+ {
241
+ "epoch": 1.8,
242
+ "learning_rate": 2.263888888888889e-05,
243
+ "loss": 0.1612,
244
+ "step": 481
245
+ },
246
+ {
247
+ "epoch": 1.85,
248
+ "learning_rate": 2.1736111111111112e-05,
249
+ "loss": 0.1608,
250
+ "step": 494
251
+ },
252
+ {
253
+ "epoch": 1.9,
254
+ "learning_rate": 2.0833333333333336e-05,
255
+ "loss": 0.0989,
256
+ "step": 507
257
+ },
258
+ {
259
+ "epoch": 1.95,
260
+ "learning_rate": 1.9930555555555556e-05,
261
+ "loss": 0.017,
262
+ "step": 520
263
+ },
264
+ {
265
+ "epoch": 2.0,
266
+ "learning_rate": 1.9027777777777776e-05,
267
+ "loss": 0.2095,
268
+ "step": 533
269
+ },
270
+ {
271
+ "epoch": 2.0,
272
+ "eval_accuracy": 0.9681647940074907,
273
+ "eval_auc": 0.9977332743119687,
274
+ "eval_f1": 0.9541778975741241,
275
+ "eval_loss": 0.1294296383857727,
276
+ "eval_precision": 0.9833333333333333,
277
+ "eval_recall": 0.9267015706806283,
278
+ "eval_runtime": 25.4048,
279
+ "eval_samples_per_second": 21.02,
280
+ "eval_steps_per_second": 1.338,
281
+ "step": 534
282
+ },
283
+ {
284
+ "epoch": 2.04,
285
+ "learning_rate": 1.8125e-05,
286
+ "loss": 0.0592,
287
+ "step": 546
288
+ },
289
+ {
290
+ "epoch": 2.09,
291
+ "learning_rate": 1.7222222222222224e-05,
292
+ "loss": 0.0126,
293
+ "step": 559
294
+ },
295
+ {
296
+ "epoch": 2.14,
297
+ "learning_rate": 1.6319444444444444e-05,
298
+ "loss": 0.0902,
299
+ "step": 572
300
+ },
301
+ {
302
+ "epoch": 2.19,
303
+ "learning_rate": 1.5416666666666668e-05,
304
+ "loss": 0.0089,
305
+ "step": 585
306
+ },
307
+ {
308
+ "epoch": 2.24,
309
+ "learning_rate": 1.4513888888888891e-05,
310
+ "loss": 0.0861,
311
+ "step": 598
312
+ },
313
+ {
314
+ "epoch": 2.29,
315
+ "learning_rate": 1.3611111111111111e-05,
316
+ "loss": 0.0661,
317
+ "step": 611
318
+ },
319
+ {
320
+ "epoch": 2.34,
321
+ "learning_rate": 1.2708333333333333e-05,
322
+ "loss": 0.0656,
323
+ "step": 624
324
+ },
325
+ {
326
+ "epoch": 2.39,
327
+ "learning_rate": 1.1805555555555555e-05,
328
+ "loss": 0.2022,
329
+ "step": 637
330
+ },
331
+ {
332
+ "epoch": 2.43,
333
+ "learning_rate": 1.0902777777777779e-05,
334
+ "loss": 0.0538,
335
+ "step": 650
336
+ },
337
+ {
338
+ "epoch": 2.48,
339
+ "learning_rate": 1e-05,
340
+ "loss": 0.0116,
341
+ "step": 663
342
+ },
343
+ {
344
+ "epoch": 2.53,
345
+ "learning_rate": 9.097222222222223e-06,
346
+ "loss": 0.0531,
347
+ "step": 676
348
+ },
349
+ {
350
+ "epoch": 2.58,
351
+ "learning_rate": 8.194444444444445e-06,
352
+ "loss": 0.2677,
353
+ "step": 689
354
+ },
355
+ {
356
+ "epoch": 2.63,
357
+ "learning_rate": 7.2916666666666674e-06,
358
+ "loss": 0.096,
359
+ "step": 702
360
+ },
361
+ {
362
+ "epoch": 2.68,
363
+ "learning_rate": 6.3888888888888885e-06,
364
+ "loss": 0.1554,
365
+ "step": 715
366
+ },
367
+ {
368
+ "epoch": 2.73,
369
+ "learning_rate": 5.486111111111111e-06,
370
+ "loss": 0.1553,
371
+ "step": 728
372
+ },
373
+ {
374
+ "epoch": 2.78,
375
+ "learning_rate": 4.583333333333333e-06,
376
+ "loss": 0.0536,
377
+ "step": 741
378
+ },
379
+ {
380
+ "epoch": 2.82,
381
+ "learning_rate": 3.680555555555556e-06,
382
+ "loss": 0.2112,
383
+ "step": 754
384
+ },
385
+ {
386
+ "epoch": 2.87,
387
+ "learning_rate": 2.777777777777778e-06,
388
+ "loss": 0.0042,
389
+ "step": 767
390
+ },
391
+ {
392
+ "epoch": 2.92,
393
+ "learning_rate": 1.875e-06,
394
+ "loss": 0.0882,
395
+ "step": 780
396
+ },
397
+ {
398
+ "epoch": 2.97,
399
+ "learning_rate": 9.722222222222222e-07,
400
+ "loss": 0.0295,
401
+ "step": 793
402
+ },
403
+ {
404
+ "epoch": 3.0,
405
+ "eval_accuracy": 0.9812734082397003,
406
+ "eval_auc": 0.9980461893059392,
407
+ "eval_f1": 0.9732620320855615,
408
+ "eval_loss": 0.08717025071382523,
409
+ "eval_precision": 0.994535519125683,
410
+ "eval_recall": 0.9528795811518325,
411
+ "eval_runtime": 24.8605,
412
+ "eval_samples_per_second": 21.48,
413
+ "eval_steps_per_second": 1.368,
414
+ "step": 801
415
+ }
416
+ ],
417
+ "logging_steps": 13,
418
+ "max_steps": 801,
419
+ "num_input_tokens_seen": 0,
420
+ "num_train_epochs": 3,
421
+ "save_steps": 500,
422
+ "total_flos": 5.010981737968927e+17,
423
+ "train_batch_size": 8,
424
+ "trial_name": null,
425
+ "trial_params": null
426
+ }
checkpoint-801/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c3b96869fa178d23b082e5a6d9bf48ec5865f979c6391b5ae6571ff0f37b681
3
+ size 4728
config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "umm-maybe/AI-image-detector",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "SwinForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 18,
12
+ 2
13
+ ],
14
+ "drop_path_rate": 0.1,
15
+ "embed_dim": 128,
16
+ "encoder_stride": 32,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.0,
19
+ "hidden_size": 1024,
20
+ "id2label": {
21
+ "0": "artificial",
22
+ "1": "human"
23
+ },
24
+ "image_size": 224,
25
+ "initializer_range": 0.02,
26
+ "label2id": {
27
+ "artificial": 0,
28
+ "human": 1
29
+ },
30
+ "layer_norm_eps": 1e-05,
31
+ "max_length": 128,
32
+ "mlp_ratio": 4.0,
33
+ "model_type": "swin",
34
+ "num_channels": 3,
35
+ "num_heads": [
36
+ 4,
37
+ 8,
38
+ 16,
39
+ 32
40
+ ],
41
+ "num_layers": 4,
42
+ "out_features": [
43
+ "stage4"
44
+ ],
45
+ "out_indices": [
46
+ 4
47
+ ],
48
+ "padding": "max_length",
49
+ "patch_size": 4,
50
+ "path_norm": true,
51
+ "problem_type": "single_label_classification",
52
+ "qkv_bias": true,
53
+ "stage_names": [
54
+ "stem",
55
+ "stage1",
56
+ "stage2",
57
+ "stage3",
58
+ "stage4"
59
+ ],
60
+ "torch_dtype": "float32",
61
+ "transformers_version": "4.36.1",
62
+ "use_absolute_embeddings": false,
63
+ "window_size": 7
64
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8809f67fbbf4b455b13aaebf2bfc90192e7c6c881fd8efce0eb51fba9a06e5c
3
+ size 347498816
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.485,
7
+ 0.456,
8
+ 0.406
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.229,
13
+ 0.224,
14
+ 0.225
15
+ ],
16
+ "resample": 3,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c3b96869fa178d23b082e5a6d9bf48ec5865f979c6391b5ae6571ff0f37b681
3
+ size 4728
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "Colby/autotrain-data-sdxl-detection",
3
+ "model": "umm-maybe/AI-image-detector",
4
+ "username": "Colby",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "/tmp/model",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "Colby/sdxl-detection",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }