include llama models
Browse files- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/.hydra/config.yaml +37 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/.hydra/hydra.yaml +155 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/Llama-3.1-8B-llama31_classification_lora-C1_inference_results.jsonl +0 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/evaluation_results.csv +2 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/run_inference_experiment.log +127 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/.hydra/config.yaml +37 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/.hydra/hydra.yaml +155 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/Llama-3.1-8B-llama31_classification_lora-C2_inference_results.jsonl +0 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/evaluation_results.csv +2 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/run_inference_experiment.log +127 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/.hydra/config.yaml +37 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/.hydra/hydra.yaml +155 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/Llama-3.1-8B-llama31_classification_lora-C3_inference_results.jsonl +0 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/evaluation_results.csv +2 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/run_inference_experiment.log +127 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/.hydra/config.yaml +37 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/.hydra/hydra.yaml +155 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/Llama-3.1-8B-llama31_classification_lora-C4_inference_results.jsonl +0 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/evaluation_results.csv +2 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/run_inference_experiment.log +127 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/.hydra/config.yaml +37 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/.hydra/hydra.yaml +155 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/Llama-3.1-8B-llama31_classification_lora-C5_inference_results.jsonl +0 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/evaluation_results.csv +2 -0
- runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/run_inference_experiment.log +127 -0
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cache_dir: /tmp/
|
| 2 |
+
dataset:
|
| 3 |
+
name: kamel-usp/aes_enem_dataset
|
| 4 |
+
split: JBCS2025
|
| 5 |
+
training_params:
|
| 6 |
+
seed: 42
|
| 7 |
+
num_train_epochs: 20
|
| 8 |
+
logging_steps: 100
|
| 9 |
+
metric_for_best_model: QWK
|
| 10 |
+
bf16: true
|
| 11 |
+
post_training_results:
|
| 12 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 13 |
+
experiments:
|
| 14 |
+
model:
|
| 15 |
+
name: meta-llama/Llama-3.1-8B
|
| 16 |
+
type: llama31_classification_lora
|
| 17 |
+
num_labels: 6
|
| 18 |
+
output_dir: ./results/llama31-8b-balanced/C1
|
| 19 |
+
logging_dir: ./logs/llama31-8b-balanced/C1
|
| 20 |
+
best_model_dir: ./results/llama31-8b-balanced/C1/best_model
|
| 21 |
+
lora_r: 8
|
| 22 |
+
lora_dropout: 0.05
|
| 23 |
+
lora_alpha: 16
|
| 24 |
+
lora_target_modules: all-linear
|
| 25 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C1
|
| 26 |
+
tokenizer:
|
| 27 |
+
name: meta-llama/Llama-3.1-8B
|
| 28 |
+
dataset:
|
| 29 |
+
grade_index: 0
|
| 30 |
+
training_params:
|
| 31 |
+
weight_decay: 0.01
|
| 32 |
+
warmup_ratio: 0.1
|
| 33 |
+
learning_rate: 5.0e-05
|
| 34 |
+
train_batch_size: 4
|
| 35 |
+
eval_batch_size: 4
|
| 36 |
+
gradient_accumulation_steps: 4
|
| 37 |
+
gradient_checkpointing: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task: []
|
| 115 |
+
job:
|
| 116 |
+
name: run_inference_experiment
|
| 117 |
+
chdir: null
|
| 118 |
+
override_dirname: ''
|
| 119 |
+
id: ???
|
| 120 |
+
num: ???
|
| 121 |
+
config_name: config
|
| 122 |
+
env_set: {}
|
| 123 |
+
env_copy: []
|
| 124 |
+
config:
|
| 125 |
+
override_dirname:
|
| 126 |
+
kv_sep: '='
|
| 127 |
+
item_sep: ','
|
| 128 |
+
exclude_keys: []
|
| 129 |
+
runtime:
|
| 130 |
+
version: 1.3.2
|
| 131 |
+
version_base: '1.1'
|
| 132 |
+
cwd: /workspace/jbcs2025
|
| 133 |
+
config_sources:
|
| 134 |
+
- path: hydra.conf
|
| 135 |
+
schema: pkg
|
| 136 |
+
provider: hydra
|
| 137 |
+
- path: /workspace/jbcs2025/configs
|
| 138 |
+
schema: file
|
| 139 |
+
provider: main
|
| 140 |
+
- path: ''
|
| 141 |
+
schema: structured
|
| 142 |
+
provider: schema
|
| 143 |
+
output_dir: /workspace/jbcs2025/outputs/2025-05-26/16-16-08
|
| 144 |
+
choices:
|
| 145 |
+
experiments: slm_decoder_models/C1
|
| 146 |
+
hydra/env: default
|
| 147 |
+
hydra/callbacks: null
|
| 148 |
+
hydra/job_logging: default
|
| 149 |
+
hydra/hydra_logging: default
|
| 150 |
+
hydra/hydra_help: default
|
| 151 |
+
hydra/help: default
|
| 152 |
+
hydra/sweeper: basic
|
| 153 |
+
hydra/launcher: basic
|
| 154 |
+
hydra/output: default
|
| 155 |
+
verbose: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/Llama-3.1-8B-llama31_classification_lora-C1_inference_results.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/evaluation_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
| 2 |
+
0.6884057971014492,25.02172968684897,0.6308698236576183,0.007246376811594235,0.44883890883890876,0.6884057971014492,0.6710623362797277,0,137,0,1,0,138,0,0,7,123,5,3,48,56,16,18,39,66,21,12,1,127,1,9,2025-05-26 16:16:08,Llama-3.1-8B-llama31_classification_lora-C1
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C1/run_inference_experiment.log
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2025-05-26 16:16:08,700][__main__][INFO] - Starting inference experiment
|
| 2 |
+
[2025-05-26 16:16:08,703][__main__][INFO] - cache_dir: /tmp/
|
| 3 |
+
dataset:
|
| 4 |
+
name: kamel-usp/aes_enem_dataset
|
| 5 |
+
split: JBCS2025
|
| 6 |
+
training_params:
|
| 7 |
+
seed: 42
|
| 8 |
+
num_train_epochs: 20
|
| 9 |
+
logging_steps: 100
|
| 10 |
+
metric_for_best_model: QWK
|
| 11 |
+
bf16: true
|
| 12 |
+
post_training_results:
|
| 13 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 14 |
+
experiments:
|
| 15 |
+
model:
|
| 16 |
+
name: meta-llama/Llama-3.1-8B
|
| 17 |
+
type: llama31_classification_lora
|
| 18 |
+
num_labels: 6
|
| 19 |
+
output_dir: ./results/llama31-8b-balanced/C1
|
| 20 |
+
logging_dir: ./logs/llama31-8b-balanced/C1
|
| 21 |
+
best_model_dir: ./results/llama31-8b-balanced/C1/best_model
|
| 22 |
+
lora_r: 8
|
| 23 |
+
lora_dropout: 0.05
|
| 24 |
+
lora_alpha: 16
|
| 25 |
+
lora_target_modules: all-linear
|
| 26 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C1
|
| 27 |
+
tokenizer:
|
| 28 |
+
name: meta-llama/Llama-3.1-8B
|
| 29 |
+
dataset:
|
| 30 |
+
grade_index: 0
|
| 31 |
+
training_params:
|
| 32 |
+
weight_decay: 0.01
|
| 33 |
+
warmup_ratio: 0.1
|
| 34 |
+
learning_rate: 5.0e-05
|
| 35 |
+
train_batch_size: 4
|
| 36 |
+
eval_batch_size: 4
|
| 37 |
+
gradient_accumulation_steps: 4
|
| 38 |
+
gradient_checkpointing: false
|
| 39 |
+
|
| 40 |
+
[2025-05-26 16:16:08,706][__main__][INFO] - Running inference with fine-tuned HF model
|
| 41 |
+
[2025-05-26 16:16:17,935][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer.json
|
| 42 |
+
[2025-05-26 16:16:17,935][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
| 43 |
+
[2025-05-26 16:16:17,935][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
| 44 |
+
[2025-05-26 16:16:17,936][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/special_tokens_map.json
|
| 45 |
+
[2025-05-26 16:16:17,936][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer_config.json
|
| 46 |
+
[2025-05-26 16:16:17,936][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
| 47 |
+
[2025-05-26 16:16:18,462][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 48 |
+
[2025-05-26 16:16:18,473][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
|
| 49 |
+
[2025-05-26 16:16:20,526][__main__][INFO] - Loading model from: meta-llama/Llama-3.1-8B
|
| 50 |
+
[2025-05-26 16:16:21,287][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
|
| 51 |
+
[2025-05-26 16:16:21,298][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
| 52 |
+
"architectures": [
|
| 53 |
+
"LlamaForCausalLM"
|
| 54 |
+
],
|
| 55 |
+
"attention_bias": false,
|
| 56 |
+
"attention_dropout": 0.0,
|
| 57 |
+
"bos_token_id": 128000,
|
| 58 |
+
"eos_token_id": 128001,
|
| 59 |
+
"head_dim": 128,
|
| 60 |
+
"hidden_act": "silu",
|
| 61 |
+
"hidden_size": 4096,
|
| 62 |
+
"id2label": {
|
| 63 |
+
"0": 0,
|
| 64 |
+
"1": 40,
|
| 65 |
+
"2": 80,
|
| 66 |
+
"3": 120,
|
| 67 |
+
"4": 160,
|
| 68 |
+
"5": 200
|
| 69 |
+
},
|
| 70 |
+
"initializer_range": 0.02,
|
| 71 |
+
"intermediate_size": 14336,
|
| 72 |
+
"label2id": {
|
| 73 |
+
"0": 0,
|
| 74 |
+
"40": 1,
|
| 75 |
+
"80": 2,
|
| 76 |
+
"120": 3,
|
| 77 |
+
"160": 4,
|
| 78 |
+
"200": 5
|
| 79 |
+
},
|
| 80 |
+
"max_position_embeddings": 131072,
|
| 81 |
+
"mlp_bias": false,
|
| 82 |
+
"model_type": "llama",
|
| 83 |
+
"num_attention_heads": 32,
|
| 84 |
+
"num_hidden_layers": 32,
|
| 85 |
+
"num_key_value_heads": 8,
|
| 86 |
+
"pretraining_tp": 1,
|
| 87 |
+
"rms_norm_eps": 1e-05,
|
| 88 |
+
"rope_scaling": {
|
| 89 |
+
"factor": 8.0,
|
| 90 |
+
"high_freq_factor": 4.0,
|
| 91 |
+
"low_freq_factor": 1.0,
|
| 92 |
+
"original_max_position_embeddings": 8192,
|
| 93 |
+
"rope_type": "llama3"
|
| 94 |
+
},
|
| 95 |
+
"rope_theta": 500000.0,
|
| 96 |
+
"tie_word_embeddings": false,
|
| 97 |
+
"torch_dtype": "bfloat16",
|
| 98 |
+
"transformers_version": "4.52.3",
|
| 99 |
+
"use_cache": true,
|
| 100 |
+
"vocab_size": 128256
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
[2025-05-26 16:16:22,660][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/model.safetensors.index.json
|
| 104 |
+
[2025-05-26 16:18:23,717][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
|
| 105 |
+
[2025-05-26 16:18:23,717][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.bfloat16.
|
| 106 |
+
[2025-05-26 16:18:28,809][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at meta-llama/Llama-3.1-8B were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
| 107 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
| 108 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
| 109 |
+
[2025-05-26 16:18:28,809][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at meta-llama/Llama-3.1-8B and are newly initialized: ['score.weight']
|
| 110 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
| 111 |
+
[2025-05-26 16:18:34,928][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_llama31_8b-balanced-C1
|
| 112 |
+
[2025-05-26 16:18:34,936][__main__][INFO] - None
|
| 113 |
+
[2025-05-26 16:18:34,942][transformers.training_args][INFO] - PyTorch: setting up devices
|
| 114 |
+
[2025-05-26 16:18:34,995][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
| 115 |
+
[2025-05-26 16:18:35,017][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
| 116 |
+
[2025-05-26 16:18:35,059][transformers.trainer][INFO] - Using auto half precision backend
|
| 117 |
+
[2025-05-26 16:18:35,060][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
| 118 |
+
[2025-05-26 16:18:35,061][__main__][INFO] - Running inference on test dataset
|
| 119 |
+
[2025-05-26 16:18:35,062][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_text, essay_year, reference, id_prompt, supporting_text, id, prompt, grades. If essay_text, essay_year, reference, id_prompt, supporting_text, id, prompt, grades are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
| 120 |
+
[2025-05-26 16:18:35,110][transformers.trainer][INFO] -
|
| 121 |
+
***** Running Prediction *****
|
| 122 |
+
[2025-05-26 16:18:35,110][transformers.trainer][INFO] - Num examples = 138
|
| 123 |
+
[2025-05-26 16:18:35,110][transformers.trainer][INFO] - Batch size = 4
|
| 124 |
+
[2025-05-26 16:19:36,666][transformers][INFO] - {'accuracy': 0.6884057971014492, 'RMSE': 25.02172968684897, 'QWK': 0.6308698236576183, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.44883890883890876, 'Micro_F1': 0.6884057971014492, 'Weighted_F1': 0.6710623362797277, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(7), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(3), 'TP_3': np.int64(48), 'TN_3': np.int64(56), 'FP_3': np.int64(16), 'FN_3': np.int64(18), 'TP_4': np.int64(39), 'TN_4': np.int64(66), 'FP_4': np.int64(21), 'FN_4': np.int64(12), 'TP_5': np.int64(1), 'TN_5': np.int64(127), 'FP_5': np.int64(1), 'FN_5': np.int64(9)}
|
| 125 |
+
[2025-05-26 16:19:36,696][__main__][INFO] - Inference results saved to Llama-3.1-8B-llama31_classification_lora-C1_inference_results.jsonl
|
| 126 |
+
[2025-05-26 16:19:36,697][__main__][INFO] - Inference results: {'accuracy': 0.6884057971014492, 'RMSE': 25.02172968684897, 'QWK': 0.6308698236576183, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.44883890883890876, 'Micro_F1': 0.6884057971014492, 'Weighted_F1': 0.6710623362797277, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(7), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(3), 'TP_3': np.int64(48), 'TN_3': np.int64(56), 'FP_3': np.int64(16), 'FN_3': np.int64(18), 'TP_4': np.int64(39), 'TN_4': np.int64(66), 'FP_4': np.int64(21), 'FN_4': np.int64(12), 'TP_5': np.int64(1), 'TN_5': np.int64(127), 'FP_5': np.int64(1), 'FN_5': np.int64(9)}
|
| 127 |
+
[2025-05-26 16:19:36,697][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cache_dir: /tmp/
|
| 2 |
+
dataset:
|
| 3 |
+
name: kamel-usp/aes_enem_dataset
|
| 4 |
+
split: JBCS2025
|
| 5 |
+
training_params:
|
| 6 |
+
seed: 42
|
| 7 |
+
num_train_epochs: 20
|
| 8 |
+
logging_steps: 100
|
| 9 |
+
metric_for_best_model: QWK
|
| 10 |
+
bf16: true
|
| 11 |
+
post_training_results:
|
| 12 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 13 |
+
experiments:
|
| 14 |
+
model:
|
| 15 |
+
name: meta-llama/Llama-3.1-8B
|
| 16 |
+
type: llama31_classification_lora
|
| 17 |
+
num_labels: 6
|
| 18 |
+
output_dir: ./results/llama31-8b-balanced/C2
|
| 19 |
+
logging_dir: ./logs/llama31-8b-balanced/C2
|
| 20 |
+
best_model_dir: ./results/llama31-8b-balanced/C2/best_model
|
| 21 |
+
lora_r: 8
|
| 22 |
+
lora_dropout: 0.05
|
| 23 |
+
lora_alpha: 16
|
| 24 |
+
lora_target_modules: all-linear
|
| 25 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C2
|
| 26 |
+
tokenizer:
|
| 27 |
+
name: meta-llama/Llama-3.1-8B
|
| 28 |
+
dataset:
|
| 29 |
+
grade_index: 1
|
| 30 |
+
training_params:
|
| 31 |
+
weight_decay: 0.01
|
| 32 |
+
warmup_ratio: 0.1
|
| 33 |
+
learning_rate: 5.0e-05
|
| 34 |
+
train_batch_size: 1
|
| 35 |
+
eval_batch_size: 4
|
| 36 |
+
gradient_accumulation_steps: 16
|
| 37 |
+
gradient_checkpointing: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task: []
|
| 115 |
+
job:
|
| 116 |
+
name: run_inference_experiment
|
| 117 |
+
chdir: null
|
| 118 |
+
override_dirname: ''
|
| 119 |
+
id: ???
|
| 120 |
+
num: ???
|
| 121 |
+
config_name: config
|
| 122 |
+
env_set: {}
|
| 123 |
+
env_copy: []
|
| 124 |
+
config:
|
| 125 |
+
override_dirname:
|
| 126 |
+
kv_sep: '='
|
| 127 |
+
item_sep: ','
|
| 128 |
+
exclude_keys: []
|
| 129 |
+
runtime:
|
| 130 |
+
version: 1.3.2
|
| 131 |
+
version_base: '1.1'
|
| 132 |
+
cwd: /workspace/jbcs2025
|
| 133 |
+
config_sources:
|
| 134 |
+
- path: hydra.conf
|
| 135 |
+
schema: pkg
|
| 136 |
+
provider: hydra
|
| 137 |
+
- path: /workspace/jbcs2025/configs
|
| 138 |
+
schema: file
|
| 139 |
+
provider: main
|
| 140 |
+
- path: ''
|
| 141 |
+
schema: structured
|
| 142 |
+
provider: schema
|
| 143 |
+
output_dir: /workspace/jbcs2025/outputs/2025-05-26/16-20-09
|
| 144 |
+
choices:
|
| 145 |
+
experiments: slm_decoder_models/C2
|
| 146 |
+
hydra/env: default
|
| 147 |
+
hydra/callbacks: null
|
| 148 |
+
hydra/job_logging: default
|
| 149 |
+
hydra/hydra_logging: default
|
| 150 |
+
hydra/hydra_help: default
|
| 151 |
+
hydra/help: default
|
| 152 |
+
hydra/sweeper: basic
|
| 153 |
+
hydra/launcher: basic
|
| 154 |
+
hydra/output: default
|
| 155 |
+
verbose: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/Llama-3.1-8B-llama31_classification_lora-C2_inference_results.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/evaluation_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
| 2 |
+
0.34782608695652173,67.75916430879359,0.32593250444049726,0.12318840579710144,0.206023539593112,0.34782608695652173,0.3205156603404325,0,137,0,1,24,63,40,11,0,133,0,5,14,69,18,37,8,92,20,18,2,106,12,18,2025-05-26 16:20:09,Llama-3.1-8B-llama31_classification_lora-C2
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C2/run_inference_experiment.log
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2025-05-26 16:20:09,265][__main__][INFO] - Starting inference experiment
|
| 2 |
+
[2025-05-26 16:20:09,267][__main__][INFO] - cache_dir: /tmp/
|
| 3 |
+
dataset:
|
| 4 |
+
name: kamel-usp/aes_enem_dataset
|
| 5 |
+
split: JBCS2025
|
| 6 |
+
training_params:
|
| 7 |
+
seed: 42
|
| 8 |
+
num_train_epochs: 20
|
| 9 |
+
logging_steps: 100
|
| 10 |
+
metric_for_best_model: QWK
|
| 11 |
+
bf16: true
|
| 12 |
+
post_training_results:
|
| 13 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 14 |
+
experiments:
|
| 15 |
+
model:
|
| 16 |
+
name: meta-llama/Llama-3.1-8B
|
| 17 |
+
type: llama31_classification_lora
|
| 18 |
+
num_labels: 6
|
| 19 |
+
output_dir: ./results/llama31-8b-balanced/C2
|
| 20 |
+
logging_dir: ./logs/llama31-8b-balanced/C2
|
| 21 |
+
best_model_dir: ./results/llama31-8b-balanced/C2/best_model
|
| 22 |
+
lora_r: 8
|
| 23 |
+
lora_dropout: 0.05
|
| 24 |
+
lora_alpha: 16
|
| 25 |
+
lora_target_modules: all-linear
|
| 26 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C2
|
| 27 |
+
tokenizer:
|
| 28 |
+
name: meta-llama/Llama-3.1-8B
|
| 29 |
+
dataset:
|
| 30 |
+
grade_index: 1
|
| 31 |
+
training_params:
|
| 32 |
+
weight_decay: 0.01
|
| 33 |
+
warmup_ratio: 0.1
|
| 34 |
+
learning_rate: 5.0e-05
|
| 35 |
+
train_batch_size: 1
|
| 36 |
+
eval_batch_size: 4
|
| 37 |
+
gradient_accumulation_steps: 16
|
| 38 |
+
gradient_checkpointing: false
|
| 39 |
+
|
| 40 |
+
[2025-05-26 16:20:09,271][__main__][INFO] - Running inference with fine-tuned HF model
|
| 41 |
+
[2025-05-26 16:20:14,904][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer.json
|
| 42 |
+
[2025-05-26 16:20:14,904][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
| 43 |
+
[2025-05-26 16:20:14,904][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
| 44 |
+
[2025-05-26 16:20:14,904][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/special_tokens_map.json
|
| 45 |
+
[2025-05-26 16:20:14,904][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer_config.json
|
| 46 |
+
[2025-05-26 16:20:14,904][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
| 47 |
+
[2025-05-26 16:20:15,414][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 48 |
+
[2025-05-26 16:20:15,424][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
|
| 49 |
+
[2025-05-26 16:20:18,025][__main__][INFO] - Loading model from: meta-llama/Llama-3.1-8B
|
| 50 |
+
[2025-05-26 16:20:18,277][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
|
| 51 |
+
[2025-05-26 16:20:18,286][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
| 52 |
+
"architectures": [
|
| 53 |
+
"LlamaForCausalLM"
|
| 54 |
+
],
|
| 55 |
+
"attention_bias": false,
|
| 56 |
+
"attention_dropout": 0.0,
|
| 57 |
+
"bos_token_id": 128000,
|
| 58 |
+
"eos_token_id": 128001,
|
| 59 |
+
"head_dim": 128,
|
| 60 |
+
"hidden_act": "silu",
|
| 61 |
+
"hidden_size": 4096,
|
| 62 |
+
"id2label": {
|
| 63 |
+
"0": 0,
|
| 64 |
+
"1": 40,
|
| 65 |
+
"2": 80,
|
| 66 |
+
"3": 120,
|
| 67 |
+
"4": 160,
|
| 68 |
+
"5": 200
|
| 69 |
+
},
|
| 70 |
+
"initializer_range": 0.02,
|
| 71 |
+
"intermediate_size": 14336,
|
| 72 |
+
"label2id": {
|
| 73 |
+
"0": 0,
|
| 74 |
+
"40": 1,
|
| 75 |
+
"80": 2,
|
| 76 |
+
"120": 3,
|
| 77 |
+
"160": 4,
|
| 78 |
+
"200": 5
|
| 79 |
+
},
|
| 80 |
+
"max_position_embeddings": 131072,
|
| 81 |
+
"mlp_bias": false,
|
| 82 |
+
"model_type": "llama",
|
| 83 |
+
"num_attention_heads": 32,
|
| 84 |
+
"num_hidden_layers": 32,
|
| 85 |
+
"num_key_value_heads": 8,
|
| 86 |
+
"pretraining_tp": 1,
|
| 87 |
+
"rms_norm_eps": 1e-05,
|
| 88 |
+
"rope_scaling": {
|
| 89 |
+
"factor": 8.0,
|
| 90 |
+
"high_freq_factor": 4.0,
|
| 91 |
+
"low_freq_factor": 1.0,
|
| 92 |
+
"original_max_position_embeddings": 8192,
|
| 93 |
+
"rope_type": "llama3"
|
| 94 |
+
},
|
| 95 |
+
"rope_theta": 500000.0,
|
| 96 |
+
"tie_word_embeddings": false,
|
| 97 |
+
"torch_dtype": "bfloat16",
|
| 98 |
+
"transformers_version": "4.52.3",
|
| 99 |
+
"use_cache": true,
|
| 100 |
+
"vocab_size": 128256
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
[2025-05-26 16:20:18,532][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/model.safetensors.index.json
|
| 104 |
+
[2025-05-26 16:20:18,533][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
|
| 105 |
+
[2025-05-26 16:20:18,533][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.bfloat16.
|
| 106 |
+
[2025-05-26 16:20:24,096][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at meta-llama/Llama-3.1-8B were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
| 107 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
| 108 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
| 109 |
+
[2025-05-26 16:20:24,097][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at meta-llama/Llama-3.1-8B and are newly initialized: ['score.weight']
|
| 110 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
| 111 |
+
[2025-05-26 16:20:30,149][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_llama31_8b-balanced-C2
|
| 112 |
+
[2025-05-26 16:20:30,155][__main__][INFO] - None
|
| 113 |
+
[2025-05-26 16:20:30,159][transformers.training_args][INFO] - PyTorch: setting up devices
|
| 114 |
+
[2025-05-26 16:20:30,209][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
| 115 |
+
[2025-05-26 16:20:30,226][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
| 116 |
+
[2025-05-26 16:20:30,263][transformers.trainer][INFO] - Using auto half precision backend
|
| 117 |
+
[2025-05-26 16:20:30,264][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
| 118 |
+
[2025-05-26 16:20:30,264][__main__][INFO] - Running inference on test dataset
|
| 119 |
+
[2025-05-26 16:20:30,265][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id, grades, prompt, essay_text, id_prompt, essay_year, supporting_text, reference. If id, grades, prompt, essay_text, id_prompt, essay_year, supporting_text, reference are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
| 120 |
+
[2025-05-26 16:20:30,303][transformers.trainer][INFO] -
|
| 121 |
+
***** Running Prediction *****
|
| 122 |
+
[2025-05-26 16:20:30,303][transformers.trainer][INFO] - Num examples = 138
|
| 123 |
+
[2025-05-26 16:20:30,303][transformers.trainer][INFO] - Batch size = 4
|
| 124 |
+
[2025-05-26 16:21:58,661][transformers][INFO] - {'accuracy': 0.34782608695652173, 'RMSE': 67.75916430879359, 'QWK': 0.32593250444049726, 'HDIV': 0.12318840579710144, 'Macro_F1': 0.206023539593112, 'Micro_F1': 0.34782608695652173, 'Weighted_F1': 0.3205156603404325, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(24), 'TN_1': np.int64(63), 'FP_1': np.int64(40), 'FN_1': np.int64(11), 'TP_2': np.int64(0), 'TN_2': np.int64(133), 'FP_2': np.int64(0), 'FN_2': np.int64(5), 'TP_3': np.int64(14), 'TN_3': np.int64(69), 'FP_3': np.int64(18), 'FN_3': np.int64(37), 'TP_4': np.int64(8), 'TN_4': np.int64(92), 'FP_4': np.int64(20), 'FN_4': np.int64(18), 'TP_5': np.int64(2), 'TN_5': np.int64(106), 'FP_5': np.int64(12), 'FN_5': np.int64(18)}
|
| 125 |
+
[2025-05-26 16:21:58,683][__main__][INFO] - Inference results saved to Llama-3.1-8B-llama31_classification_lora-C2_inference_results.jsonl
|
| 126 |
+
[2025-05-26 16:21:58,684][__main__][INFO] - Inference results: {'accuracy': 0.34782608695652173, 'RMSE': 67.75916430879359, 'QWK': 0.32593250444049726, 'HDIV': 0.12318840579710144, 'Macro_F1': 0.206023539593112, 'Micro_F1': 0.34782608695652173, 'Weighted_F1': 0.3205156603404325, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(24), 'TN_1': np.int64(63), 'FP_1': np.int64(40), 'FN_1': np.int64(11), 'TP_2': np.int64(0), 'TN_2': np.int64(133), 'FP_2': np.int64(0), 'FN_2': np.int64(5), 'TP_3': np.int64(14), 'TN_3': np.int64(69), 'FP_3': np.int64(18), 'FN_3': np.int64(37), 'TP_4': np.int64(8), 'TN_4': np.int64(92), 'FP_4': np.int64(20), 'FN_4': np.int64(18), 'TP_5': np.int64(2), 'TN_5': np.int64(106), 'FP_5': np.int64(12), 'FN_5': np.int64(18)}
|
| 127 |
+
[2025-05-26 16:21:58,684][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cache_dir: /tmp/
|
| 2 |
+
dataset:
|
| 3 |
+
name: kamel-usp/aes_enem_dataset
|
| 4 |
+
split: JBCS2025
|
| 5 |
+
training_params:
|
| 6 |
+
seed: 42
|
| 7 |
+
num_train_epochs: 20
|
| 8 |
+
logging_steps: 100
|
| 9 |
+
metric_for_best_model: QWK
|
| 10 |
+
bf16: true
|
| 11 |
+
post_training_results:
|
| 12 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 13 |
+
experiments:
|
| 14 |
+
model:
|
| 15 |
+
name: meta-llama/Llama-3.1-8B
|
| 16 |
+
type: llama31_classification_lora
|
| 17 |
+
num_labels: 6
|
| 18 |
+
output_dir: ./results/llama31-8b-balanced/C3
|
| 19 |
+
logging_dir: ./logs/llama31-8b-balanced/C3
|
| 20 |
+
best_model_dir: ./results/llama31-8b-balanced/C3/best_model
|
| 21 |
+
lora_r: 8
|
| 22 |
+
lora_dropout: 0.05
|
| 23 |
+
lora_alpha: 16
|
| 24 |
+
lora_target_modules: all-linear
|
| 25 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C3
|
| 26 |
+
tokenizer:
|
| 27 |
+
name: meta-llama/Llama-3.1-8B
|
| 28 |
+
dataset:
|
| 29 |
+
grade_index: 2
|
| 30 |
+
training_params:
|
| 31 |
+
weight_decay: 0.01
|
| 32 |
+
warmup_ratio: 0.1
|
| 33 |
+
learning_rate: 5.0e-05
|
| 34 |
+
train_batch_size: 1
|
| 35 |
+
eval_batch_size: 2
|
| 36 |
+
gradient_accumulation_steps: 16
|
| 37 |
+
gradient_checkpointing: true
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task: []
|
| 115 |
+
job:
|
| 116 |
+
name: run_inference_experiment
|
| 117 |
+
chdir: null
|
| 118 |
+
override_dirname: ''
|
| 119 |
+
id: ???
|
| 120 |
+
num: ???
|
| 121 |
+
config_name: config
|
| 122 |
+
env_set: {}
|
| 123 |
+
env_copy: []
|
| 124 |
+
config:
|
| 125 |
+
override_dirname:
|
| 126 |
+
kv_sep: '='
|
| 127 |
+
item_sep: ','
|
| 128 |
+
exclude_keys: []
|
| 129 |
+
runtime:
|
| 130 |
+
version: 1.3.2
|
| 131 |
+
version_base: '1.1'
|
| 132 |
+
cwd: /workspace/jbcs2025
|
| 133 |
+
config_sources:
|
| 134 |
+
- path: hydra.conf
|
| 135 |
+
schema: pkg
|
| 136 |
+
provider: hydra
|
| 137 |
+
- path: /workspace/jbcs2025/configs
|
| 138 |
+
schema: file
|
| 139 |
+
provider: main
|
| 140 |
+
- path: ''
|
| 141 |
+
schema: structured
|
| 142 |
+
provider: schema
|
| 143 |
+
output_dir: /workspace/jbcs2025/outputs/2025-05-26/16-24-50
|
| 144 |
+
choices:
|
| 145 |
+
experiments: slm_decoder_models/C3
|
| 146 |
+
hydra/env: default
|
| 147 |
+
hydra/callbacks: null
|
| 148 |
+
hydra/job_logging: default
|
| 149 |
+
hydra/hydra_logging: default
|
| 150 |
+
hydra/hydra_help: default
|
| 151 |
+
hydra/help: default
|
| 152 |
+
hydra/sweeper: basic
|
| 153 |
+
hydra/launcher: basic
|
| 154 |
+
hydra/output: default
|
| 155 |
+
verbose: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/Llama-3.1-8B-llama31_classification_lora-C3_inference_results.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/evaluation_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
| 2 |
+
0.41304347826086957,49.22589162013322,0.378860317870526,0.050724637681159424,0.26816705521034756,0.41304347826086957,0.38577074089443025,0,137,0,1,4,101,8,25,8,112,8,10,24,59,34,21,21,71,29,17,0,129,2,7,2025-05-26 16:24:50,Llama-3.1-8B-llama31_classification_lora-C3
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C3/run_inference_experiment.log
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2025-05-26 16:24:50,293][__main__][INFO] - Starting inference experiment
|
| 2 |
+
[2025-05-26 16:24:50,296][__main__][INFO] - cache_dir: /tmp/
|
| 3 |
+
dataset:
|
| 4 |
+
name: kamel-usp/aes_enem_dataset
|
| 5 |
+
split: JBCS2025
|
| 6 |
+
training_params:
|
| 7 |
+
seed: 42
|
| 8 |
+
num_train_epochs: 20
|
| 9 |
+
logging_steps: 100
|
| 10 |
+
metric_for_best_model: QWK
|
| 11 |
+
bf16: true
|
| 12 |
+
post_training_results:
|
| 13 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 14 |
+
experiments:
|
| 15 |
+
model:
|
| 16 |
+
name: meta-llama/Llama-3.1-8B
|
| 17 |
+
type: llama31_classification_lora
|
| 18 |
+
num_labels: 6
|
| 19 |
+
output_dir: ./results/llama31-8b-balanced/C3
|
| 20 |
+
logging_dir: ./logs/llama31-8b-balanced/C3
|
| 21 |
+
best_model_dir: ./results/llama31-8b-balanced/C3/best_model
|
| 22 |
+
lora_r: 8
|
| 23 |
+
lora_dropout: 0.05
|
| 24 |
+
lora_alpha: 16
|
| 25 |
+
lora_target_modules: all-linear
|
| 26 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C3
|
| 27 |
+
tokenizer:
|
| 28 |
+
name: meta-llama/Llama-3.1-8B
|
| 29 |
+
dataset:
|
| 30 |
+
grade_index: 2
|
| 31 |
+
training_params:
|
| 32 |
+
weight_decay: 0.01
|
| 33 |
+
warmup_ratio: 0.1
|
| 34 |
+
learning_rate: 5.0e-05
|
| 35 |
+
train_batch_size: 1
|
| 36 |
+
eval_batch_size: 2
|
| 37 |
+
gradient_accumulation_steps: 16
|
| 38 |
+
gradient_checkpointing: true
|
| 39 |
+
|
| 40 |
+
[2025-05-26 16:24:50,299][__main__][INFO] - Running inference with fine-tuned HF model
|
| 41 |
+
[2025-05-26 16:24:57,006][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer.json
|
| 42 |
+
[2025-05-26 16:24:57,006][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
| 43 |
+
[2025-05-26 16:24:57,006][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
| 44 |
+
[2025-05-26 16:24:57,006][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/special_tokens_map.json
|
| 45 |
+
[2025-05-26 16:24:57,006][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer_config.json
|
| 46 |
+
[2025-05-26 16:24:57,007][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
| 47 |
+
[2025-05-26 16:24:57,516][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 48 |
+
[2025-05-26 16:24:57,526][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
|
| 49 |
+
[2025-05-26 16:24:59,764][__main__][INFO] - Loading model from: meta-llama/Llama-3.1-8B
|
| 50 |
+
[2025-05-26 16:25:00,022][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
|
| 51 |
+
[2025-05-26 16:25:00,033][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
| 52 |
+
"architectures": [
|
| 53 |
+
"LlamaForCausalLM"
|
| 54 |
+
],
|
| 55 |
+
"attention_bias": false,
|
| 56 |
+
"attention_dropout": 0.0,
|
| 57 |
+
"bos_token_id": 128000,
|
| 58 |
+
"eos_token_id": 128001,
|
| 59 |
+
"head_dim": 128,
|
| 60 |
+
"hidden_act": "silu",
|
| 61 |
+
"hidden_size": 4096,
|
| 62 |
+
"id2label": {
|
| 63 |
+
"0": 0,
|
| 64 |
+
"1": 40,
|
| 65 |
+
"2": 80,
|
| 66 |
+
"3": 120,
|
| 67 |
+
"4": 160,
|
| 68 |
+
"5": 200
|
| 69 |
+
},
|
| 70 |
+
"initializer_range": 0.02,
|
| 71 |
+
"intermediate_size": 14336,
|
| 72 |
+
"label2id": {
|
| 73 |
+
"0": 0,
|
| 74 |
+
"40": 1,
|
| 75 |
+
"80": 2,
|
| 76 |
+
"120": 3,
|
| 77 |
+
"160": 4,
|
| 78 |
+
"200": 5
|
| 79 |
+
},
|
| 80 |
+
"max_position_embeddings": 131072,
|
| 81 |
+
"mlp_bias": false,
|
| 82 |
+
"model_type": "llama",
|
| 83 |
+
"num_attention_heads": 32,
|
| 84 |
+
"num_hidden_layers": 32,
|
| 85 |
+
"num_key_value_heads": 8,
|
| 86 |
+
"pretraining_tp": 1,
|
| 87 |
+
"rms_norm_eps": 1e-05,
|
| 88 |
+
"rope_scaling": {
|
| 89 |
+
"factor": 8.0,
|
| 90 |
+
"high_freq_factor": 4.0,
|
| 91 |
+
"low_freq_factor": 1.0,
|
| 92 |
+
"original_max_position_embeddings": 8192,
|
| 93 |
+
"rope_type": "llama3"
|
| 94 |
+
},
|
| 95 |
+
"rope_theta": 500000.0,
|
| 96 |
+
"tie_word_embeddings": false,
|
| 97 |
+
"torch_dtype": "bfloat16",
|
| 98 |
+
"transformers_version": "4.52.3",
|
| 99 |
+
"use_cache": true,
|
| 100 |
+
"vocab_size": 128256
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
[2025-05-26 16:25:00,279][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/model.safetensors.index.json
|
| 104 |
+
[2025-05-26 16:25:00,280][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
|
| 105 |
+
[2025-05-26 16:25:00,280][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.bfloat16.
|
| 106 |
+
[2025-05-26 16:25:05,352][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at meta-llama/Llama-3.1-8B were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
| 107 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
| 108 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
| 109 |
+
[2025-05-26 16:25:05,352][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at meta-llama/Llama-3.1-8B and are newly initialized: ['score.weight']
|
| 110 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
| 111 |
+
[2025-05-26 16:25:11,342][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_llama31_8b-balanced-C3
|
| 112 |
+
[2025-05-26 16:25:11,349][__main__][INFO] - None
|
| 113 |
+
[2025-05-26 16:25:11,354][transformers.training_args][INFO] - PyTorch: setting up devices
|
| 114 |
+
[2025-05-26 16:25:11,408][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
| 115 |
+
[2025-05-26 16:25:11,429][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
| 116 |
+
[2025-05-26 16:25:11,473][transformers.trainer][INFO] - Using auto half precision backend
|
| 117 |
+
[2025-05-26 16:25:11,474][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
| 118 |
+
[2025-05-26 16:25:11,474][__main__][INFO] - Running inference on test dataset
|
| 119 |
+
[2025-05-26 16:25:11,475][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id, grades, prompt, supporting_text, essay_year, id_prompt, reference, essay_text. If id, grades, prompt, supporting_text, essay_year, id_prompt, reference, essay_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
| 120 |
+
[2025-05-26 16:25:11,514][transformers.trainer][INFO] -
|
| 121 |
+
***** Running Prediction *****
|
| 122 |
+
[2025-05-26 16:25:11,514][transformers.trainer][INFO] - Num examples = 138
|
| 123 |
+
[2025-05-26 16:25:11,514][transformers.trainer][INFO] - Batch size = 2
|
| 124 |
+
[2025-05-26 16:26:21,368][transformers][INFO] - {'accuracy': 0.41304347826086957, 'RMSE': 49.22589162013322, 'QWK': 0.378860317870526, 'HDIV': 0.050724637681159424, 'Macro_F1': 0.26816705521034756, 'Micro_F1': 0.41304347826086957, 'Weighted_F1': 0.38577074089443025, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(4), 'TN_1': np.int64(101), 'FP_1': np.int64(8), 'FN_1': np.int64(25), 'TP_2': np.int64(8), 'TN_2': np.int64(112), 'FP_2': np.int64(8), 'FN_2': np.int64(10), 'TP_3': np.int64(24), 'TN_3': np.int64(59), 'FP_3': np.int64(34), 'FN_3': np.int64(21), 'TP_4': np.int64(21), 'TN_4': np.int64(71), 'FP_4': np.int64(29), 'FN_4': np.int64(17), 'TP_5': np.int64(0), 'TN_5': np.int64(129), 'FP_5': np.int64(2), 'FN_5': np.int64(7)}
|
| 125 |
+
[2025-05-26 16:26:21,390][__main__][INFO] - Inference results saved to Llama-3.1-8B-llama31_classification_lora-C3_inference_results.jsonl
|
| 126 |
+
[2025-05-26 16:26:21,391][__main__][INFO] - Inference results: {'accuracy': 0.41304347826086957, 'RMSE': 49.22589162013322, 'QWK': 0.378860317870526, 'HDIV': 0.050724637681159424, 'Macro_F1': 0.26816705521034756, 'Micro_F1': 0.41304347826086957, 'Weighted_F1': 0.38577074089443025, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(4), 'TN_1': np.int64(101), 'FP_1': np.int64(8), 'FN_1': np.int64(25), 'TP_2': np.int64(8), 'TN_2': np.int64(112), 'FP_2': np.int64(8), 'FN_2': np.int64(10), 'TP_3': np.int64(24), 'TN_3': np.int64(59), 'FP_3': np.int64(34), 'FN_3': np.int64(21), 'TP_4': np.int64(21), 'TN_4': np.int64(71), 'FP_4': np.int64(29), 'FN_4': np.int64(17), 'TP_5': np.int64(0), 'TN_5': np.int64(129), 'FP_5': np.int64(2), 'FN_5': np.int64(7)}
|
| 127 |
+
[2025-05-26 16:26:21,391][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cache_dir: /tmp/
|
| 2 |
+
dataset:
|
| 3 |
+
name: kamel-usp/aes_enem_dataset
|
| 4 |
+
split: JBCS2025
|
| 5 |
+
training_params:
|
| 6 |
+
seed: 42
|
| 7 |
+
num_train_epochs: 20
|
| 8 |
+
logging_steps: 100
|
| 9 |
+
metric_for_best_model: QWK
|
| 10 |
+
bf16: true
|
| 11 |
+
post_training_results:
|
| 12 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 13 |
+
experiments:
|
| 14 |
+
model:
|
| 15 |
+
name: meta-llama/Llama-3.1-8B
|
| 16 |
+
type: llama31_classification_lora
|
| 17 |
+
num_labels: 6
|
| 18 |
+
output_dir: ./results/llama31-8b-balanced/C4
|
| 19 |
+
logging_dir: ./logs/llama31-8b-balanced/C4
|
| 20 |
+
best_model_dir: ./results/llama31-8b-balanced/C4/best_model
|
| 21 |
+
lora_r: 8
|
| 22 |
+
lora_dropout: 0.05
|
| 23 |
+
lora_alpha: 16
|
| 24 |
+
lora_target_modules: all-linear
|
| 25 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C4
|
| 26 |
+
tokenizer:
|
| 27 |
+
name: meta-llama/Llama-3.1-8B
|
| 28 |
+
dataset:
|
| 29 |
+
grade_index: 3
|
| 30 |
+
training_params:
|
| 31 |
+
weight_decay: 0.01
|
| 32 |
+
warmup_ratio: 0.1
|
| 33 |
+
learning_rate: 5.0e-05
|
| 34 |
+
train_batch_size: 2
|
| 35 |
+
eval_batch_size: 4
|
| 36 |
+
gradient_accumulation_steps: 8
|
| 37 |
+
gradient_checkpointing: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task: []
|
| 115 |
+
job:
|
| 116 |
+
name: run_inference_experiment
|
| 117 |
+
chdir: null
|
| 118 |
+
override_dirname: ''
|
| 119 |
+
id: ???
|
| 120 |
+
num: ???
|
| 121 |
+
config_name: config
|
| 122 |
+
env_set: {}
|
| 123 |
+
env_copy: []
|
| 124 |
+
config:
|
| 125 |
+
override_dirname:
|
| 126 |
+
kv_sep: '='
|
| 127 |
+
item_sep: ','
|
| 128 |
+
exclude_keys: []
|
| 129 |
+
runtime:
|
| 130 |
+
version: 1.3.2
|
| 131 |
+
version_base: '1.1'
|
| 132 |
+
cwd: /workspace/jbcs2025
|
| 133 |
+
config_sources:
|
| 134 |
+
- path: hydra.conf
|
| 135 |
+
schema: pkg
|
| 136 |
+
provider: hydra
|
| 137 |
+
- path: /workspace/jbcs2025/configs
|
| 138 |
+
schema: file
|
| 139 |
+
provider: main
|
| 140 |
+
- path: ''
|
| 141 |
+
schema: structured
|
| 142 |
+
provider: schema
|
| 143 |
+
output_dir: /workspace/jbcs2025/outputs/2025-05-26/16-30-26
|
| 144 |
+
choices:
|
| 145 |
+
experiments: slm_decoder_models/C4
|
| 146 |
+
hydra/env: default
|
| 147 |
+
hydra/callbacks: null
|
| 148 |
+
hydra/job_logging: default
|
| 149 |
+
hydra/hydra_logging: default
|
| 150 |
+
hydra/hydra_help: default
|
| 151 |
+
hydra/help: default
|
| 152 |
+
hydra/sweeper: basic
|
| 153 |
+
hydra/launcher: basic
|
| 154 |
+
hydra/output: default
|
| 155 |
+
verbose: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/Llama-3.1-8B-llama31_classification_lora-C4_inference_results.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/evaluation_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
| 2 |
+
0.6521739130434783,28.078564726521353,0.5181762168823167,0.007246376811594235,0.31756740187062565,0.6521739130434783,0.6518531929244537,0,137,0,1,0,137,0,1,5,124,5,4,53,45,17,23,32,72,20,14,0,127,6,5,2025-05-26 16:30:26,Llama-3.1-8B-llama31_classification_lora-C4
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C4/run_inference_experiment.log
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2025-05-26 16:30:26,246][__main__][INFO] - Starting inference experiment
|
| 2 |
+
[2025-05-26 16:30:26,248][__main__][INFO] - cache_dir: /tmp/
|
| 3 |
+
dataset:
|
| 4 |
+
name: kamel-usp/aes_enem_dataset
|
| 5 |
+
split: JBCS2025
|
| 6 |
+
training_params:
|
| 7 |
+
seed: 42
|
| 8 |
+
num_train_epochs: 20
|
| 9 |
+
logging_steps: 100
|
| 10 |
+
metric_for_best_model: QWK
|
| 11 |
+
bf16: true
|
| 12 |
+
post_training_results:
|
| 13 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 14 |
+
experiments:
|
| 15 |
+
model:
|
| 16 |
+
name: meta-llama/Llama-3.1-8B
|
| 17 |
+
type: llama31_classification_lora
|
| 18 |
+
num_labels: 6
|
| 19 |
+
output_dir: ./results/llama31-8b-balanced/C4
|
| 20 |
+
logging_dir: ./logs/llama31-8b-balanced/C4
|
| 21 |
+
best_model_dir: ./results/llama31-8b-balanced/C4/best_model
|
| 22 |
+
lora_r: 8
|
| 23 |
+
lora_dropout: 0.05
|
| 24 |
+
lora_alpha: 16
|
| 25 |
+
lora_target_modules: all-linear
|
| 26 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C4
|
| 27 |
+
tokenizer:
|
| 28 |
+
name: meta-llama/Llama-3.1-8B
|
| 29 |
+
dataset:
|
| 30 |
+
grade_index: 3
|
| 31 |
+
training_params:
|
| 32 |
+
weight_decay: 0.01
|
| 33 |
+
warmup_ratio: 0.1
|
| 34 |
+
learning_rate: 5.0e-05
|
| 35 |
+
train_batch_size: 2
|
| 36 |
+
eval_batch_size: 4
|
| 37 |
+
gradient_accumulation_steps: 8
|
| 38 |
+
gradient_checkpointing: false
|
| 39 |
+
|
| 40 |
+
[2025-05-26 16:30:26,252][__main__][INFO] - Running inference with fine-tuned HF model
|
| 41 |
+
[2025-05-26 16:30:32,032][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer.json
|
| 42 |
+
[2025-05-26 16:30:32,033][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
| 43 |
+
[2025-05-26 16:30:32,033][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
| 44 |
+
[2025-05-26 16:30:32,033][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/special_tokens_map.json
|
| 45 |
+
[2025-05-26 16:30:32,033][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer_config.json
|
| 46 |
+
[2025-05-26 16:30:32,033][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
| 47 |
+
[2025-05-26 16:30:32,543][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 48 |
+
[2025-05-26 16:30:32,554][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
|
| 49 |
+
[2025-05-26 16:30:34,754][__main__][INFO] - Loading model from: meta-llama/Llama-3.1-8B
|
| 50 |
+
[2025-05-26 16:30:34,999][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
|
| 51 |
+
[2025-05-26 16:30:35,010][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
| 52 |
+
"architectures": [
|
| 53 |
+
"LlamaForCausalLM"
|
| 54 |
+
],
|
| 55 |
+
"attention_bias": false,
|
| 56 |
+
"attention_dropout": 0.0,
|
| 57 |
+
"bos_token_id": 128000,
|
| 58 |
+
"eos_token_id": 128001,
|
| 59 |
+
"head_dim": 128,
|
| 60 |
+
"hidden_act": "silu",
|
| 61 |
+
"hidden_size": 4096,
|
| 62 |
+
"id2label": {
|
| 63 |
+
"0": 0,
|
| 64 |
+
"1": 40,
|
| 65 |
+
"2": 80,
|
| 66 |
+
"3": 120,
|
| 67 |
+
"4": 160,
|
| 68 |
+
"5": 200
|
| 69 |
+
},
|
| 70 |
+
"initializer_range": 0.02,
|
| 71 |
+
"intermediate_size": 14336,
|
| 72 |
+
"label2id": {
|
| 73 |
+
"0": 0,
|
| 74 |
+
"40": 1,
|
| 75 |
+
"80": 2,
|
| 76 |
+
"120": 3,
|
| 77 |
+
"160": 4,
|
| 78 |
+
"200": 5
|
| 79 |
+
},
|
| 80 |
+
"max_position_embeddings": 131072,
|
| 81 |
+
"mlp_bias": false,
|
| 82 |
+
"model_type": "llama",
|
| 83 |
+
"num_attention_heads": 32,
|
| 84 |
+
"num_hidden_layers": 32,
|
| 85 |
+
"num_key_value_heads": 8,
|
| 86 |
+
"pretraining_tp": 1,
|
| 87 |
+
"rms_norm_eps": 1e-05,
|
| 88 |
+
"rope_scaling": {
|
| 89 |
+
"factor": 8.0,
|
| 90 |
+
"high_freq_factor": 4.0,
|
| 91 |
+
"low_freq_factor": 1.0,
|
| 92 |
+
"original_max_position_embeddings": 8192,
|
| 93 |
+
"rope_type": "llama3"
|
| 94 |
+
},
|
| 95 |
+
"rope_theta": 500000.0,
|
| 96 |
+
"tie_word_embeddings": false,
|
| 97 |
+
"torch_dtype": "bfloat16",
|
| 98 |
+
"transformers_version": "4.52.3",
|
| 99 |
+
"use_cache": true,
|
| 100 |
+
"vocab_size": 128256
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
[2025-05-26 16:30:35,252][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/model.safetensors.index.json
|
| 104 |
+
[2025-05-26 16:30:35,253][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
|
| 105 |
+
[2025-05-26 16:30:35,253][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.bfloat16.
|
| 106 |
+
[2025-05-26 16:30:40,299][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at meta-llama/Llama-3.1-8B were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
| 107 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
| 108 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
| 109 |
+
[2025-05-26 16:30:40,300][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at meta-llama/Llama-3.1-8B and are newly initialized: ['score.weight']
|
| 110 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
| 111 |
+
[2025-05-26 16:30:46,631][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_llama31_8b-balanced-C4
|
| 112 |
+
[2025-05-26 16:30:46,637][__main__][INFO] - None
|
| 113 |
+
[2025-05-26 16:30:46,642][transformers.training_args][INFO] - PyTorch: setting up devices
|
| 114 |
+
[2025-05-26 16:30:46,694][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
| 115 |
+
[2025-05-26 16:30:46,713][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
| 116 |
+
[2025-05-26 16:30:46,751][transformers.trainer][INFO] - Using auto half precision backend
|
| 117 |
+
[2025-05-26 16:30:46,752][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
| 118 |
+
[2025-05-26 16:30:46,752][__main__][INFO] - Running inference on test dataset
|
| 119 |
+
[2025-05-26 16:30:46,754][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: reference, grades, id_prompt, supporting_text, essay_text, id, prompt, essay_year. If reference, grades, id_prompt, supporting_text, essay_text, id, prompt, essay_year are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
| 120 |
+
[2025-05-26 16:30:46,792][transformers.trainer][INFO] -
|
| 121 |
+
***** Running Prediction *****
|
| 122 |
+
[2025-05-26 16:30:46,792][transformers.trainer][INFO] - Num examples = 138
|
| 123 |
+
[2025-05-26 16:30:46,792][transformers.trainer][INFO] - Batch size = 4
|
| 124 |
+
[2025-05-26 16:31:56,287][transformers][INFO] - {'accuracy': 0.6521739130434783, 'RMSE': 28.078564726521353, 'QWK': 0.5181762168823167, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.31756740187062565, 'Micro_F1': 0.6521739130434783, 'Weighted_F1': 0.6518531929244537, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(5), 'TN_2': np.int64(124), 'FP_2': np.int64(5), 'FN_2': np.int64(4), 'TP_3': np.int64(53), 'TN_3': np.int64(45), 'FP_3': np.int64(17), 'FN_3': np.int64(23), 'TP_4': np.int64(32), 'TN_4': np.int64(72), 'FP_4': np.int64(20), 'FN_4': np.int64(14), 'TP_5': np.int64(0), 'TN_5': np.int64(127), 'FP_5': np.int64(6), 'FN_5': np.int64(5)}
|
| 125 |
+
[2025-05-26 16:31:56,314][__main__][INFO] - Inference results saved to Llama-3.1-8B-llama31_classification_lora-C4_inference_results.jsonl
|
| 126 |
+
[2025-05-26 16:31:56,315][__main__][INFO] - Inference results: {'accuracy': 0.6521739130434783, 'RMSE': 28.078564726521353, 'QWK': 0.5181762168823167, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.31756740187062565, 'Micro_F1': 0.6521739130434783, 'Weighted_F1': 0.6518531929244537, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(5), 'TN_2': np.int64(124), 'FP_2': np.int64(5), 'FN_2': np.int64(4), 'TP_3': np.int64(53), 'TN_3': np.int64(45), 'FP_3': np.int64(17), 'FN_3': np.int64(23), 'TP_4': np.int64(32), 'TN_4': np.int64(72), 'FP_4': np.int64(20), 'FN_4': np.int64(14), 'TP_5': np.int64(0), 'TN_5': np.int64(127), 'FP_5': np.int64(6), 'FN_5': np.int64(5)}
|
| 127 |
+
[2025-05-26 16:31:56,315][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cache_dir: /tmp/
|
| 2 |
+
dataset:
|
| 3 |
+
name: kamel-usp/aes_enem_dataset
|
| 4 |
+
split: JBCS2025
|
| 5 |
+
training_params:
|
| 6 |
+
seed: 42
|
| 7 |
+
num_train_epochs: 20
|
| 8 |
+
logging_steps: 100
|
| 9 |
+
metric_for_best_model: QWK
|
| 10 |
+
bf16: true
|
| 11 |
+
post_training_results:
|
| 12 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 13 |
+
experiments:
|
| 14 |
+
model:
|
| 15 |
+
name: meta-llama/Llama-3.1-8B
|
| 16 |
+
type: llama31_classification_lora
|
| 17 |
+
num_labels: 6
|
| 18 |
+
output_dir: ./results/llama31-8b-balanced/C5
|
| 19 |
+
logging_dir: ./logs/llama31-8b-balanced/C5
|
| 20 |
+
best_model_dir: ./results/llama31-8b-balanced/C5/best_model
|
| 21 |
+
lora_r: 8
|
| 22 |
+
lora_dropout: 0.05
|
| 23 |
+
lora_alpha: 16
|
| 24 |
+
lora_target_modules: all-linear
|
| 25 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C5
|
| 26 |
+
tokenizer:
|
| 27 |
+
name: meta-llama/Llama-3.1-8B
|
| 28 |
+
dataset:
|
| 29 |
+
grade_index: 4
|
| 30 |
+
training_params:
|
| 31 |
+
weight_decay: 0.01
|
| 32 |
+
warmup_ratio: 0.1
|
| 33 |
+
learning_rate: 5.0e-05
|
| 34 |
+
train_batch_size: 2
|
| 35 |
+
eval_batch_size: 4
|
| 36 |
+
gradient_accumulation_steps: 8
|
| 37 |
+
gradient_checkpointing: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task: []
|
| 115 |
+
job:
|
| 116 |
+
name: run_inference_experiment
|
| 117 |
+
chdir: null
|
| 118 |
+
override_dirname: ''
|
| 119 |
+
id: ???
|
| 120 |
+
num: ???
|
| 121 |
+
config_name: config
|
| 122 |
+
env_set: {}
|
| 123 |
+
env_copy: []
|
| 124 |
+
config:
|
| 125 |
+
override_dirname:
|
| 126 |
+
kv_sep: '='
|
| 127 |
+
item_sep: ','
|
| 128 |
+
exclude_keys: []
|
| 129 |
+
runtime:
|
| 130 |
+
version: 1.3.2
|
| 131 |
+
version_base: '1.1'
|
| 132 |
+
cwd: /workspace/jbcs2025
|
| 133 |
+
config_sources:
|
| 134 |
+
- path: hydra.conf
|
| 135 |
+
schema: pkg
|
| 136 |
+
provider: hydra
|
| 137 |
+
- path: /workspace/jbcs2025/configs
|
| 138 |
+
schema: file
|
| 139 |
+
provider: main
|
| 140 |
+
- path: ''
|
| 141 |
+
schema: structured
|
| 142 |
+
provider: schema
|
| 143 |
+
output_dir: /workspace/jbcs2025/outputs/2025-05-26/16-32-56
|
| 144 |
+
choices:
|
| 145 |
+
experiments: slm_decoder_models/C5
|
| 146 |
+
hydra/env: default
|
| 147 |
+
hydra/callbacks: null
|
| 148 |
+
hydra/job_logging: default
|
| 149 |
+
hydra/hydra_logging: default
|
| 150 |
+
hydra/hydra_help: default
|
| 151 |
+
hydra/help: default
|
| 152 |
+
hydra/sweeper: basic
|
| 153 |
+
hydra/launcher: basic
|
| 154 |
+
hydra/output: default
|
| 155 |
+
verbose: false
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/Llama-3.1-8B-llama31_classification_lora-C5_inference_results.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/evaluation_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
| 2 |
+
0.2753623188405797,61.10100926607787,0.4094726770146714,0.09420289855072461,0.2742797540739807,0.2753623188405797,0.2624605552578044,9,101,15,13,8,84,22,24,7,93,21,17,10,81,32,15,3,99,7,29,1,132,3,2,2025-05-26 16:32:56,Llama-3.1-8B-llama31_classification_lora-C5
|
runs/slm_decoder_models/llama-3.1-8b/Llama-3.1-8B-llama31_classification_lora-C5/run_inference_experiment.log
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2025-05-26 16:32:56,078][__main__][INFO] - Starting inference experiment
|
| 2 |
+
[2025-05-26 16:32:56,080][__main__][INFO] - cache_dir: /tmp/
|
| 3 |
+
dataset:
|
| 4 |
+
name: kamel-usp/aes_enem_dataset
|
| 5 |
+
split: JBCS2025
|
| 6 |
+
training_params:
|
| 7 |
+
seed: 42
|
| 8 |
+
num_train_epochs: 20
|
| 9 |
+
logging_steps: 100
|
| 10 |
+
metric_for_best_model: QWK
|
| 11 |
+
bf16: true
|
| 12 |
+
post_training_results:
|
| 13 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
| 14 |
+
experiments:
|
| 15 |
+
model:
|
| 16 |
+
name: meta-llama/Llama-3.1-8B
|
| 17 |
+
type: llama31_classification_lora
|
| 18 |
+
num_labels: 6
|
| 19 |
+
output_dir: ./results/llama31-8b-balanced/C5
|
| 20 |
+
logging_dir: ./logs/llama31-8b-balanced/C5
|
| 21 |
+
best_model_dir: ./results/llama31-8b-balanced/C5/best_model
|
| 22 |
+
lora_r: 8
|
| 23 |
+
lora_dropout: 0.05
|
| 24 |
+
lora_alpha: 16
|
| 25 |
+
lora_target_modules: all-linear
|
| 26 |
+
checkpoint_path: kamel-usp/jbcs2025_llama31_8b-balanced-C5
|
| 27 |
+
tokenizer:
|
| 28 |
+
name: meta-llama/Llama-3.1-8B
|
| 29 |
+
dataset:
|
| 30 |
+
grade_index: 4
|
| 31 |
+
training_params:
|
| 32 |
+
weight_decay: 0.01
|
| 33 |
+
warmup_ratio: 0.1
|
| 34 |
+
learning_rate: 5.0e-05
|
| 35 |
+
train_batch_size: 2
|
| 36 |
+
eval_batch_size: 4
|
| 37 |
+
gradient_accumulation_steps: 8
|
| 38 |
+
gradient_checkpointing: false
|
| 39 |
+
|
| 40 |
+
[2025-05-26 16:32:56,084][__main__][INFO] - Running inference with fine-tuned HF model
|
| 41 |
+
[2025-05-26 16:33:02,366][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer.json
|
| 42 |
+
[2025-05-26 16:33:02,367][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
| 43 |
+
[2025-05-26 16:33:02,367][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
| 44 |
+
[2025-05-26 16:33:02,367][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/special_tokens_map.json
|
| 45 |
+
[2025-05-26 16:33:02,367][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer_config.json
|
| 46 |
+
[2025-05-26 16:33:02,367][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
| 47 |
+
[2025-05-26 16:33:02,878][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 48 |
+
[2025-05-26 16:33:02,888][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
|
| 49 |
+
[2025-05-26 16:33:05,206][__main__][INFO] - Loading model from: meta-llama/Llama-3.1-8B
|
| 50 |
+
[2025-05-26 16:33:05,484][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
|
| 51 |
+
[2025-05-26 16:33:05,494][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
| 52 |
+
"architectures": [
|
| 53 |
+
"LlamaForCausalLM"
|
| 54 |
+
],
|
| 55 |
+
"attention_bias": false,
|
| 56 |
+
"attention_dropout": 0.0,
|
| 57 |
+
"bos_token_id": 128000,
|
| 58 |
+
"eos_token_id": 128001,
|
| 59 |
+
"head_dim": 128,
|
| 60 |
+
"hidden_act": "silu",
|
| 61 |
+
"hidden_size": 4096,
|
| 62 |
+
"id2label": {
|
| 63 |
+
"0": 0,
|
| 64 |
+
"1": 40,
|
| 65 |
+
"2": 80,
|
| 66 |
+
"3": 120,
|
| 67 |
+
"4": 160,
|
| 68 |
+
"5": 200
|
| 69 |
+
},
|
| 70 |
+
"initializer_range": 0.02,
|
| 71 |
+
"intermediate_size": 14336,
|
| 72 |
+
"label2id": {
|
| 73 |
+
"0": 0,
|
| 74 |
+
"40": 1,
|
| 75 |
+
"80": 2,
|
| 76 |
+
"120": 3,
|
| 77 |
+
"160": 4,
|
| 78 |
+
"200": 5
|
| 79 |
+
},
|
| 80 |
+
"max_position_embeddings": 131072,
|
| 81 |
+
"mlp_bias": false,
|
| 82 |
+
"model_type": "llama",
|
| 83 |
+
"num_attention_heads": 32,
|
| 84 |
+
"num_hidden_layers": 32,
|
| 85 |
+
"num_key_value_heads": 8,
|
| 86 |
+
"pretraining_tp": 1,
|
| 87 |
+
"rms_norm_eps": 1e-05,
|
| 88 |
+
"rope_scaling": {
|
| 89 |
+
"factor": 8.0,
|
| 90 |
+
"high_freq_factor": 4.0,
|
| 91 |
+
"low_freq_factor": 1.0,
|
| 92 |
+
"original_max_position_embeddings": 8192,
|
| 93 |
+
"rope_type": "llama3"
|
| 94 |
+
},
|
| 95 |
+
"rope_theta": 500000.0,
|
| 96 |
+
"tie_word_embeddings": false,
|
| 97 |
+
"torch_dtype": "bfloat16",
|
| 98 |
+
"transformers_version": "4.52.3",
|
| 99 |
+
"use_cache": true,
|
| 100 |
+
"vocab_size": 128256
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
[2025-05-26 16:33:05,735][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/model.safetensors.index.json
|
| 104 |
+
[2025-05-26 16:33:05,736][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
|
| 105 |
+
[2025-05-26 16:33:05,736][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.bfloat16.
|
| 106 |
+
[2025-05-26 16:33:10,716][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at meta-llama/Llama-3.1-8B were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
| 107 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
| 108 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
| 109 |
+
[2025-05-26 16:33:10,716][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at meta-llama/Llama-3.1-8B and are newly initialized: ['score.weight']
|
| 110 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
| 111 |
+
[2025-05-26 16:33:16,736][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_llama31_8b-balanced-C5
|
| 112 |
+
[2025-05-26 16:33:16,742][__main__][INFO] - None
|
| 113 |
+
[2025-05-26 16:33:16,746][transformers.training_args][INFO] - PyTorch: setting up devices
|
| 114 |
+
[2025-05-26 16:33:16,798][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
| 115 |
+
[2025-05-26 16:33:16,818][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
| 116 |
+
[2025-05-26 16:33:16,859][transformers.trainer][INFO] - Using auto half precision backend
|
| 117 |
+
[2025-05-26 16:33:16,860][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
| 118 |
+
[2025-05-26 16:33:16,861][__main__][INFO] - Running inference on test dataset
|
| 119 |
+
[2025-05-26 16:33:16,862][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_text, reference, supporting_text, essay_year, id_prompt, prompt, id, grades. If essay_text, reference, supporting_text, essay_year, id_prompt, prompt, id, grades are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
| 120 |
+
[2025-05-26 16:33:16,900][transformers.trainer][INFO] -
|
| 121 |
+
***** Running Prediction *****
|
| 122 |
+
[2025-05-26 16:33:16,900][transformers.trainer][INFO] - Num examples = 138
|
| 123 |
+
[2025-05-26 16:33:16,900][transformers.trainer][INFO] - Batch size = 4
|
| 124 |
+
[2025-05-26 16:34:30,892][transformers][INFO] - {'accuracy': 0.2753623188405797, 'RMSE': 61.10100926607787, 'QWK': 0.4094726770146714, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.2742797540739807, 'Micro_F1': 0.2753623188405797, 'Weighted_F1': 0.2624605552578044, 'TP_0': np.int64(9), 'TN_0': np.int64(101), 'FP_0': np.int64(15), 'FN_0': np.int64(13), 'TP_1': np.int64(8), 'TN_1': np.int64(84), 'FP_1': np.int64(22), 'FN_1': np.int64(24), 'TP_2': np.int64(7), 'TN_2': np.int64(93), 'FP_2': np.int64(21), 'FN_2': np.int64(17), 'TP_3': np.int64(10), 'TN_3': np.int64(81), 'FP_3': np.int64(32), 'FN_3': np.int64(15), 'TP_4': np.int64(3), 'TN_4': np.int64(99), 'FP_4': np.int64(7), 'FN_4': np.int64(29), 'TP_5': np.int64(1), 'TN_5': np.int64(132), 'FP_5': np.int64(3), 'FN_5': np.int64(2)}
|
| 125 |
+
[2025-05-26 16:34:30,918][__main__][INFO] - Inference results saved to Llama-3.1-8B-llama31_classification_lora-C5_inference_results.jsonl
|
| 126 |
+
[2025-05-26 16:34:30,919][__main__][INFO] - Inference results: {'accuracy': 0.2753623188405797, 'RMSE': 61.10100926607787, 'QWK': 0.4094726770146714, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.2742797540739807, 'Micro_F1': 0.2753623188405797, 'Weighted_F1': 0.2624605552578044, 'TP_0': np.int64(9), 'TN_0': np.int64(101), 'FP_0': np.int64(15), 'FN_0': np.int64(13), 'TP_1': np.int64(8), 'TN_1': np.int64(84), 'FP_1': np.int64(22), 'FN_1': np.int64(24), 'TP_2': np.int64(7), 'TN_2': np.int64(93), 'FP_2': np.int64(21), 'FN_2': np.int64(17), 'TP_3': np.int64(10), 'TN_3': np.int64(81), 'FP_3': np.int64(32), 'FN_3': np.int64(15), 'TP_4': np.int64(3), 'TN_4': np.int64(99), 'FP_4': np.int64(7), 'FN_4': np.int64(29), 'TP_5': np.int64(1), 'TN_5': np.int64(132), 'FP_5': np.int64(3), 'FN_5': np.int64(2)}
|
| 127 |
+
[2025-05-26 16:34:30,919][__main__][INFO] - Inference experiment completed
|