|
wandb_version: 1 |
|
|
|
_wandb: |
|
desc: null |
|
value: |
|
python_version: 3.10.13 |
|
cli_version: 0.16.6 |
|
framework: huggingface |
|
huggingface_version: 4.41.0 |
|
is_jupyter_run: true |
|
is_kaggle_kernel: true |
|
start_time: 1716377654.0 |
|
t: |
|
1: |
|
- 1 |
|
- 2 |
|
- 3 |
|
- 5 |
|
- 11 |
|
- 12 |
|
- 49 |
|
- 51 |
|
- 53 |
|
- 55 |
|
- 71 |
|
- 98 |
|
- 105 |
|
2: |
|
- 1 |
|
- 2 |
|
- 3 |
|
- 5 |
|
- 11 |
|
- 12 |
|
- 49 |
|
- 51 |
|
- 53 |
|
- 55 |
|
- 71 |
|
- 98 |
|
- 105 |
|
3: |
|
- 7 |
|
- 13 |
|
- 19 |
|
- 23 |
|
- 62 |
|
4: 3.10.13 |
|
5: 0.16.6 |
|
6: 4.41.0 |
|
8: |
|
- 1 |
|
- 2 |
|
- 5 |
|
9: |
|
1: transformers_trainer |
|
13: linux-x86_64 |
|
m: |
|
- 1: train/global_step |
|
6: |
|
- 3 |
|
- 1: train/loss |
|
5: 1 |
|
6: |
|
- 1 |
|
- 1: train/grad_norm |
|
5: 1 |
|
6: |
|
- 1 |
|
- 1: train/learning_rate |
|
5: 1 |
|
6: |
|
- 1 |
|
- 1: train/epoch |
|
5: 1 |
|
6: |
|
- 1 |
|
peft_config: |
|
desc: null |
|
value: |
|
default: |
|
peft_type: LORA |
|
auto_mapping: null |
|
base_model_name_or_path: core42/jais-13b |
|
revision: null |
|
task_type: CAUSAL_LM |
|
inference_mode: false |
|
r: 16 |
|
target_modules: |
|
- c_attn |
|
lora_alpha: 32 |
|
lora_dropout: 0.05 |
|
fan_in_fan_out: false |
|
bias: none |
|
use_rslora: false |
|
modules_to_save: null |
|
init_lora_weights: true |
|
layers_to_transform: null |
|
layers_pattern: null |
|
rank_pattern: {} |
|
alpha_pattern: {} |
|
megatron_config: null |
|
megatron_core: megatron.core |
|
loftq_config: {} |
|
use_dora: false |
|
layer_replication: null |
|
vocab_size: |
|
desc: null |
|
value: 84992 |
|
n_positions: |
|
desc: null |
|
value: 2048 |
|
n_embd: |
|
desc: null |
|
value: 5120 |
|
n_layer: |
|
desc: null |
|
value: 40 |
|
n_head: |
|
desc: null |
|
value: 40 |
|
n_inner: |
|
desc: null |
|
value: 13653 |
|
activation_function: |
|
desc: null |
|
value: swiglu |
|
resid_pdrop: |
|
desc: null |
|
value: 0.0 |
|
embd_pdrop: |
|
desc: null |
|
value: 0.0 |
|
attn_pdrop: |
|
desc: null |
|
value: 0.0 |
|
layer_norm_epsilon: |
|
desc: null |
|
value: 1.0e-05 |
|
initializer_range: |
|
desc: null |
|
value: 0.02 |
|
scale_attn_weights: |
|
desc: null |
|
value: true |
|
use_cache: |
|
desc: null |
|
value: false |
|
scale_attn_by_inverse_layer_idx: |
|
desc: null |
|
value: false |
|
reorder_and_upcast_attn: |
|
desc: null |
|
value: false |
|
bos_token_id: |
|
desc: null |
|
value: 0 |
|
eos_token_id: |
|
desc: null |
|
value: 0 |
|
position_embedding_type: |
|
desc: null |
|
value: alibi |
|
width_scale: |
|
desc: null |
|
value: 0.11100000000000002 |
|
embeddings_scale: |
|
desc: null |
|
value: 14.6 |
|
scale_qk_dot_by_d: |
|
desc: null |
|
value: true |
|
return_dict: |
|
desc: null |
|
value: true |
|
output_hidden_states: |
|
desc: null |
|
value: false |
|
output_attentions: |
|
desc: null |
|
value: false |
|
torchscript: |
|
desc: null |
|
value: false |
|
torch_dtype: |
|
desc: null |
|
value: float32 |
|
use_bfloat16: |
|
desc: null |
|
value: false |
|
tf_legacy_loss: |
|
desc: null |
|
value: false |
|
pruned_heads: |
|
desc: null |
|
value: {} |
|
tie_word_embeddings: |
|
desc: null |
|
value: true |
|
chunk_size_feed_forward: |
|
desc: null |
|
value: 0 |
|
is_encoder_decoder: |
|
desc: null |
|
value: false |
|
is_decoder: |
|
desc: null |
|
value: false |
|
cross_attention_hidden_size: |
|
desc: null |
|
value: null |
|
add_cross_attention: |
|
desc: null |
|
value: false |
|
tie_encoder_decoder: |
|
desc: null |
|
value: false |
|
max_length: |
|
desc: null |
|
value: 20 |
|
min_length: |
|
desc: null |
|
value: 0 |
|
do_sample: |
|
desc: null |
|
value: false |
|
early_stopping: |
|
desc: null |
|
value: false |
|
num_beams: |
|
desc: null |
|
value: 1 |
|
num_beam_groups: |
|
desc: null |
|
value: 1 |
|
diversity_penalty: |
|
desc: null |
|
value: 0.0 |
|
temperature: |
|
desc: null |
|
value: 1.0 |
|
top_k: |
|
desc: null |
|
value: 50 |
|
top_p: |
|
desc: null |
|
value: 1.0 |
|
typical_p: |
|
desc: null |
|
value: 1.0 |
|
repetition_penalty: |
|
desc: null |
|
value: 1.0 |
|
length_penalty: |
|
desc: null |
|
value: 1.0 |
|
no_repeat_ngram_size: |
|
desc: null |
|
value: 0 |
|
encoder_no_repeat_ngram_size: |
|
desc: null |
|
value: 0 |
|
bad_words_ids: |
|
desc: null |
|
value: null |
|
num_return_sequences: |
|
desc: null |
|
value: 1 |
|
output_scores: |
|
desc: null |
|
value: false |
|
return_dict_in_generate: |
|
desc: null |
|
value: false |
|
forced_bos_token_id: |
|
desc: null |
|
value: null |
|
forced_eos_token_id: |
|
desc: null |
|
value: null |
|
remove_invalid_values: |
|
desc: null |
|
value: false |
|
exponential_decay_length_penalty: |
|
desc: null |
|
value: null |
|
suppress_tokens: |
|
desc: null |
|
value: null |
|
begin_suppress_tokens: |
|
desc: null |
|
value: null |
|
architectures: |
|
desc: null |
|
value: |
|
- JAISLMHeadModel |
|
finetuning_task: |
|
desc: null |
|
value: null |
|
id2label: |
|
desc: null |
|
value: |
|
'0': LABEL_0 |
|
'1': LABEL_1 |
|
label2id: |
|
desc: null |
|
value: |
|
LABEL_0: 0 |
|
LABEL_1: 1 |
|
tokenizer_class: |
|
desc: null |
|
value: null |
|
prefix: |
|
desc: null |
|
value: null |
|
pad_token_id: |
|
desc: null |
|
value: 0 |
|
sep_token_id: |
|
desc: null |
|
value: null |
|
decoder_start_token_id: |
|
desc: null |
|
value: null |
|
task_specific_params: |
|
desc: null |
|
value: null |
|
problem_type: |
|
desc: null |
|
value: null |
|
_name_or_path: |
|
desc: null |
|
value: core42/jais-13b |
|
transformers_version: |
|
desc: null |
|
value: 4.41.0 |
|
auto_map: |
|
desc: null |
|
value: |
|
AutoConfig: core42/jais-13b--configuration_jais.JAISConfig |
|
AutoModel: core42/jais-13b--modeling_jais.JAISModel |
|
AutoModelForCausalLM: core42/jais-13b--modeling_jais.JAISLMHeadModel |
|
AutoModelForQuestionAnswering: core42/jais-13b--modeling_jais.JAISForQuestionAnswering |
|
AutoModelForSequenceClassification: core42/jais-13b--modeling_jais.JAISForSequenceClassification |
|
AutoModelForTokenClassification: core42/jais-13b--modeling_jais.JAISForTokenClassification |
|
model_type: |
|
desc: null |
|
value: jais |
|
quantization_config: |
|
desc: null |
|
value: |
|
quant_method: QuantizationMethod.BITS_AND_BYTES |
|
_load_in_8bit: false |
|
_load_in_4bit: true |
|
llm_int8_threshold: 6.0 |
|
llm_int8_skip_modules: null |
|
llm_int8_enable_fp32_cpu_offload: false |
|
llm_int8_has_fp16_weight: false |
|
bnb_4bit_quant_type: nf4 |
|
bnb_4bit_use_double_quant: false |
|
bnb_4bit_compute_dtype: bfloat16 |
|
bnb_4bit_quant_storage: uint8 |
|
load_in_4bit: true |
|
load_in_8bit: false |
|
output_dir: |
|
desc: null |
|
value: /kaggle/working/ |
|
overwrite_output_dir: |
|
desc: null |
|
value: false |
|
do_train: |
|
desc: null |
|
value: false |
|
do_eval: |
|
desc: null |
|
value: false |
|
do_predict: |
|
desc: null |
|
value: false |
|
eval_strategy: |
|
desc: null |
|
value: 'no' |
|
prediction_loss_only: |
|
desc: null |
|
value: false |
|
per_device_train_batch_size: |
|
desc: null |
|
value: 8 |
|
per_device_eval_batch_size: |
|
desc: null |
|
value: 8 |
|
per_gpu_train_batch_size: |
|
desc: null |
|
value: null |
|
per_gpu_eval_batch_size: |
|
desc: null |
|
value: null |
|
gradient_accumulation_steps: |
|
desc: null |
|
value: 1 |
|
eval_accumulation_steps: |
|
desc: null |
|
value: null |
|
eval_delay: |
|
desc: null |
|
value: 0 |
|
learning_rate: |
|
desc: null |
|
value: 0.0002 |
|
weight_decay: |
|
desc: null |
|
value: 0.0 |
|
adam_beta1: |
|
desc: null |
|
value: 0.9 |
|
adam_beta2: |
|
desc: null |
|
value: 0.999 |
|
adam_epsilon: |
|
desc: null |
|
value: 1.0e-08 |
|
max_grad_norm: |
|
desc: null |
|
value: 1.0 |
|
num_train_epochs: |
|
desc: null |
|
value: 2 |
|
max_steps: |
|
desc: null |
|
value: -1 |
|
lr_scheduler_type: |
|
desc: null |
|
value: linear |
|
lr_scheduler_kwargs: |
|
desc: null |
|
value: {} |
|
warmup_ratio: |
|
desc: null |
|
value: 0.0 |
|
warmup_steps: |
|
desc: null |
|
value: 0 |
|
log_level: |
|
desc: null |
|
value: passive |
|
log_level_replica: |
|
desc: null |
|
value: warning |
|
log_on_each_node: |
|
desc: null |
|
value: true |
|
logging_dir: |
|
desc: null |
|
value: /kaggle/working/runs/May22_11-33-56_2c1b614ec68f |
|
logging_strategy: |
|
desc: null |
|
value: steps |
|
logging_first_step: |
|
desc: null |
|
value: false |
|
logging_steps: |
|
desc: null |
|
value: 10 |
|
logging_nan_inf_filter: |
|
desc: null |
|
value: true |
|
save_strategy: |
|
desc: null |
|
value: epoch |
|
save_steps: |
|
desc: null |
|
value: 500 |
|
save_total_limit: |
|
desc: null |
|
value: 4 |
|
save_safetensors: |
|
desc: null |
|
value: true |
|
save_on_each_node: |
|
desc: null |
|
value: false |
|
save_only_model: |
|
desc: null |
|
value: false |
|
restore_callback_states_from_checkpoint: |
|
desc: null |
|
value: false |
|
no_cuda: |
|
desc: null |
|
value: false |
|
use_cpu: |
|
desc: null |
|
value: false |
|
use_mps_device: |
|
desc: null |
|
value: false |
|
seed: |
|
desc: null |
|
value: 42 |
|
data_seed: |
|
desc: null |
|
value: null |
|
jit_mode_eval: |
|
desc: null |
|
value: false |
|
use_ipex: |
|
desc: null |
|
value: false |
|
bf16: |
|
desc: null |
|
value: true |
|
fp16: |
|
desc: null |
|
value: false |
|
fp16_opt_level: |
|
desc: null |
|
value: O1 |
|
half_precision_backend: |
|
desc: null |
|
value: auto |
|
bf16_full_eval: |
|
desc: null |
|
value: false |
|
fp16_full_eval: |
|
desc: null |
|
value: false |
|
tf32: |
|
desc: null |
|
value: null |
|
local_rank: |
|
desc: null |
|
value: 0 |
|
ddp_backend: |
|
desc: null |
|
value: null |
|
tpu_num_cores: |
|
desc: null |
|
value: null |
|
tpu_metrics_debug: |
|
desc: null |
|
value: false |
|
debug: |
|
desc: null |
|
value: [] |
|
dataloader_drop_last: |
|
desc: null |
|
value: false |
|
eval_steps: |
|
desc: null |
|
value: null |
|
dataloader_num_workers: |
|
desc: null |
|
value: 0 |
|
dataloader_prefetch_factor: |
|
desc: null |
|
value: null |
|
past_index: |
|
desc: null |
|
value: -1 |
|
run_name: |
|
desc: null |
|
value: /kaggle/working/ |
|
disable_tqdm: |
|
desc: null |
|
value: false |
|
remove_unused_columns: |
|
desc: null |
|
value: true |
|
label_names: |
|
desc: null |
|
value: null |
|
load_best_model_at_end: |
|
desc: null |
|
value: false |
|
metric_for_best_model: |
|
desc: null |
|
value: null |
|
greater_is_better: |
|
desc: null |
|
value: null |
|
ignore_data_skip: |
|
desc: null |
|
value: false |
|
fsdp: |
|
desc: null |
|
value: [] |
|
fsdp_min_num_params: |
|
desc: null |
|
value: 0 |
|
fsdp_config: |
|
desc: null |
|
value: |
|
min_num_params: 0 |
|
xla: false |
|
xla_fsdp_v2: false |
|
xla_fsdp_grad_ckpt: false |
|
fsdp_transformer_layer_cls_to_wrap: |
|
desc: null |
|
value: null |
|
accelerator_config: |
|
desc: null |
|
value: |
|
split_batches: false |
|
dispatch_batches: null |
|
even_batches: true |
|
use_seedable_sampler: true |
|
non_blocking: false |
|
gradient_accumulation_kwargs: null |
|
deepspeed: |
|
desc: null |
|
value: null |
|
label_smoothing_factor: |
|
desc: null |
|
value: 0.0 |
|
optim: |
|
desc: null |
|
value: adamw_torch |
|
optim_args: |
|
desc: null |
|
value: null |
|
adafactor: |
|
desc: null |
|
value: false |
|
group_by_length: |
|
desc: null |
|
value: false |
|
length_column_name: |
|
desc: null |
|
value: length |
|
report_to: |
|
desc: null |
|
value: |
|
- tensorboard |
|
- wandb |
|
ddp_find_unused_parameters: |
|
desc: null |
|
value: null |
|
ddp_bucket_cap_mb: |
|
desc: null |
|
value: null |
|
ddp_broadcast_buffers: |
|
desc: null |
|
value: null |
|
dataloader_pin_memory: |
|
desc: null |
|
value: true |
|
dataloader_persistent_workers: |
|
desc: null |
|
value: false |
|
skip_memory_metrics: |
|
desc: null |
|
value: true |
|
use_legacy_prediction_loop: |
|
desc: null |
|
value: false |
|
push_to_hub: |
|
desc: null |
|
value: false |
|
resume_from_checkpoint: |
|
desc: null |
|
value: null |
|
hub_model_id: |
|
desc: null |
|
value: null |
|
hub_strategy: |
|
desc: null |
|
value: every_save |
|
hub_token: |
|
desc: null |
|
value: <HUB_TOKEN> |
|
hub_private_repo: |
|
desc: null |
|
value: false |
|
hub_always_push: |
|
desc: null |
|
value: false |
|
gradient_checkpointing: |
|
desc: null |
|
value: false |
|
gradient_checkpointing_kwargs: |
|
desc: null |
|
value: null |
|
include_inputs_for_metrics: |
|
desc: null |
|
value: false |
|
eval_do_concat_batches: |
|
desc: null |
|
value: true |
|
fp16_backend: |
|
desc: null |
|
value: auto |
|
evaluation_strategy: |
|
desc: null |
|
value: null |
|
push_to_hub_model_id: |
|
desc: null |
|
value: null |
|
push_to_hub_organization: |
|
desc: null |
|
value: null |
|
push_to_hub_token: |
|
desc: null |
|
value: <PUSH_TO_HUB_TOKEN> |
|
mp_parameters: |
|
desc: null |
|
value: '' |
|
auto_find_batch_size: |
|
desc: null |
|
value: true |
|
full_determinism: |
|
desc: null |
|
value: false |
|
torchdynamo: |
|
desc: null |
|
value: null |
|
ray_scope: |
|
desc: null |
|
value: last |
|
ddp_timeout: |
|
desc: null |
|
value: 1800 |
|
torch_compile: |
|
desc: null |
|
value: false |
|
torch_compile_backend: |
|
desc: null |
|
value: null |
|
torch_compile_mode: |
|
desc: null |
|
value: null |
|
dispatch_batches: |
|
desc: null |
|
value: null |
|
split_batches: |
|
desc: null |
|
value: null |
|
include_tokens_per_second: |
|
desc: null |
|
value: false |
|
include_num_input_tokens_seen: |
|
desc: null |
|
value: false |
|
neftune_noise_alpha: |
|
desc: null |
|
value: null |
|
optim_target_modules: |
|
desc: null |
|
value: null |
|
batch_eval_metrics: |
|
desc: null |
|
value: false |
|
model/num_parameters: |
|
desc: null |
|
value: 13033919160 |
|
|