kejian commited on
Commit
f9f2f06
1 Parent(s): 5dd71e2

update model card README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -7
README.md CHANGED
@@ -36,10 +36,10 @@ More information needed
36
 
37
  The following hyperparameters were used during training:
38
  - learning_rate: 0.0005
39
- - train_batch_size: 32
40
- - eval_batch_size: 16
41
  - seed: 42
42
- - gradient_accumulation_steps: 4
43
  - total_train_batch_size: 128
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
@@ -60,7 +60,7 @@ The following hyperparameters were used during training:
60
  'is_split_by_sentences': True,
61
  'skip_tokens': 1649999872},
62
  'generation': {'batch_size': 128,
63
- 'every_n_steps': 256,
64
  'force_call_on': [12588],
65
  'metrics_configs': [{}, {'n': 1}, {}],
66
  'scenario_configs': [{'display_as_html': True,
@@ -88,7 +88,7 @@ The following hyperparameters were used during training:
88
  'prompts_path': 'resources/functions_csnet.jsonl',
89
  'use_prompt_for_scoring': True}],
90
  'scorer_config': {}},
91
- 'kl_gpt3_callback': {'every_n_steps': 256,
92
  'force_call_on': [12588],
93
  'gpt3_kwargs': {'model_name': 'code-cushman-001'},
94
  'max_tokens': 64,
@@ -96,7 +96,8 @@ The following hyperparameters were used during training:
96
  'model': {'from_scratch': False,
97
  'gpt2_config_kwargs': {'reorder_and_upcast_attn': True,
98
  'scale_attn_by': True},
99
- 'model_kwargs': {'revision': 'c38e2b6acf17781918d39a310ee1adc4674a8225'},
 
100
  'path_or_name': 'kejian/mighty-rwr'},
101
  'objective': {'alpha': 1, 'beta': 10, 'name': 'AWR'},
102
  'tokenizer': {'path_or_name': 'codeparrot/codeparrot-small'},
@@ -122,4 +123,4 @@ The following hyperparameters were used during training:
122
  'weight_decay': 0.1}}
123
 
124
  # Wandb URL:
125
- https://wandb.ai/kejian/uncategorized/runs/1wuapzsu
36
 
37
  The following hyperparameters were used during training:
38
  - learning_rate: 0.0005
39
+ - train_batch_size: 64
40
+ - eval_batch_size: 32
41
  - seed: 42
42
+ - gradient_accumulation_steps: 2
43
  - total_train_batch_size: 128
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
60
  'is_split_by_sentences': True,
61
  'skip_tokens': 1649999872},
62
  'generation': {'batch_size': 128,
63
+ 'every_n_steps': 512,
64
  'force_call_on': [12588],
65
  'metrics_configs': [{}, {'n': 1}, {}],
66
  'scenario_configs': [{'display_as_html': True,
88
  'prompts_path': 'resources/functions_csnet.jsonl',
89
  'use_prompt_for_scoring': True}],
90
  'scorer_config': {}},
91
+ 'kl_gpt3_callback': {'every_n_steps': 512,
92
  'force_call_on': [12588],
93
  'gpt3_kwargs': {'model_name': 'code-cushman-001'},
94
  'max_tokens': 64,
96
  'model': {'from_scratch': False,
97
  'gpt2_config_kwargs': {'reorder_and_upcast_attn': True,
98
  'scale_attn_by': True},
99
+ 'model_kwargs': {'revision': 'c38e2b6acf17781918d39a310ee1adc4674a8225',
100
+ 'value_head_config': {'is_detached': False}},
101
  'path_or_name': 'kejian/mighty-rwr'},
102
  'objective': {'alpha': 1, 'beta': 10, 'name': 'AWR'},
103
  'tokenizer': {'path_or_name': 'codeparrot/codeparrot-small'},
123
  'weight_decay': 0.1}}
124
 
125
  # Wandb URL:
126
+ https://wandb.ai/kejian/uncategorized/runs/39mf4btg