3v324v23 commited on
Commit
d07208b
1 Parent(s): 6c2c09e
Files changed (2) hide show
  1. config.yaml +66 -0
  2. lol.pth +3 -0
config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ global:
2
+ name: train-abinet-wo-iter
3
+ phase: train
4
+ stage: train-super
5
+ workdir: workdir
6
+ seed: ~
7
+
8
+ dataset:
9
+ train: {
10
+ roots: [
11
+ 'output_tbell_dataset2/',
12
+ # 'data/training/MJ/MJ_train/',
13
+ # 'data/training/MJ/MJ_test/',
14
+ # 'data/training/MJ/MJ_valid/',
15
+ # 'data/training/ST'
16
+ ],
17
+ batch_size: 60
18
+ }
19
+ test: {
20
+ roots: ['output_tbell_dataset2/'],
21
+ batch_size: 60
22
+ }
23
+ data_aug: True
24
+ multiscales: False
25
+ num_workers: 60
26
+
27
+ training:
28
+ epochs: 1000000
29
+ show_iters: 50
30
+ eval_iters: 250
31
+ # save_iters: 200
32
+
33
+ optimizer:
34
+ type: Adam
35
+ true_wd: False
36
+ wd: 0.0
37
+ bn_wd: False
38
+ clip_grad: 20
39
+ lr: 0.0001
40
+ args: {
41
+ betas: !!python/tuple [0.9, 0.999], # for default Adam
42
+ }
43
+ scheduler: {
44
+ periods: [6, 4],
45
+ gamma: 0.1,
46
+ }
47
+
48
+ model:
49
+ name: 'modules.model_abinet.ABINetModel'
50
+ vision: {
51
+ checkpoint: workdir/pretrain-vision-model/best-pretrain-vision-model.pth,
52
+ loss_weight: 1.,
53
+ attention: 'position',
54
+ backbone: 'transformer',
55
+ backbone_ln: 3,
56
+ }
57
+ language: {
58
+ checkpoint: workdir/pretrain-language-model/pretrain-language-model.pth,
59
+ num_layers: 4,
60
+ loss_weight: 1.,
61
+ detach: True,
62
+ use_self_attn: False
63
+ }
64
+ alignment: {
65
+ loss_weight: 1.,
66
+ }
lol.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f186283b1fae645578562fe233311413e3a4607903dded8c012976e82ac74c35
3
+ size 442033107