File size: 2,212 Bytes
6097744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
DATA:
  dataset: classification
  dataset_json_file: /data02/xy/dataEngine/json_data/LuojiaHOG(test)_.json
#  dataset_json_file: /data02/xy/dataEngine/json_data/merged_output_combined_9w_resplit.json
  # dataset_json_file: /data02/xy/dataEngine/json_data/merged_output_combined_9w_resplit.json
  exp_name: classifi
  ratio: 0.9
  dataset_train_split: 0.6
  dataset_query_split: 0.2
  imgs_folder: /data02/xy/Clip-hash/datasets/image/
  label_path: /data02/xy/Clip-hash/labels.txt
  num_classes: 10
  # num_classes: 131
TRAIN:
  # Base Arch
  # clip_pretrain: /data02/xy/Clip-hash/pretrain/RS5M_ViT-B-32.pt
  clip_pretrain: ./cisen/pretrain/RS5M_ViT-B-32.pt
  model_name: ViT-B-32 
  ckpt_path: /data02/xy/GeoRSCLIP/codebase/inference/pretrain/RS5M_ViT-B-32.pt
  input_size: 224
  word_len: 328
  word_dim: 1024
  vis_dim: 512
  fpn_in: [ 512, 768, 768 ]
  fpn_out: [ 768, 768, 768, 512 ]
  sync_bn: True
  # Decoder
  num_layers: 3
  num_head: 8
  dim_ffn: 2048
  dropout: 0.1
  intermediate: False
  # Training Setting
  workers: 32  # data loader workers
  workers_val: 16
  epochs: 50
  milestones: [50]
  start_epoch: 0
  batch_size: 256 # batch size for training
  batch_size_val: 256 # batch size for validation during training, memory and speed tradeoff  11111
  base_lr: 0.0001
  min_lr: 0.00000001
  lr_decay: 0.5
  lr_multi: 0.1
  weight_decay: 0.
  max_norm: 0.
  manual_seed: 0
  print_freq: 1
  lamda1: 0.5
  lamda2: 0.5
  beta1: 0.5
  beta2: 0.5
  eta: 0.2
  warmup_epochs: 0
  contrastive: [0.4, 0.3, 0.3]
  # Resume & Save

  output_folder: /data02/xy/Clip-hash/exp/
  save_freq: 1
  weight:  # path to initial weight (default: none)
  resume: False # path to latest checkpoint (default: none)
  evaluate: True  # evaluate on validation set, extra gpu memory needed and small batch_size_val is recommend
Distributed:
  dist_url: tcp://localhost:3693
  dist_backend: 'nccl'
  multiprocessing_distributed: True
  world_size: 1
  rank: 0
TEST:
  test_split: val-test
  gpu : [0]
  test_lmdb: /data02/xy/Clip-hash/datasets/lmdb/refcoco/val.lmdb
  visualize: False
  topk: 5
  test_batch_size: 256 #1111111
  val_batch_size: 1