abarbosa commited on
Commit
010ff83
·
1 Parent(s): 2bf2981

add phi4 models

Browse files
Files changed (30) hide show
  1. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/.hydra/config.yaml +37 -0
  2. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/.hydra/hydra.yaml +155 -0
  3. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/.hydra/overrides.yaml +1 -0
  4. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/evaluation_results.csv +2 -0
  5. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/phi-4-phi4_classification_lora-C1_inference_results.jsonl +0 -0
  6. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/run_inference_experiment.log +124 -0
  7. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/.hydra/config.yaml +37 -0
  8. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/.hydra/hydra.yaml +155 -0
  9. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/.hydra/overrides.yaml +1 -0
  10. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/evaluation_results.csv +2 -0
  11. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/phi-4-phi4_classification_lora-C2_inference_results.jsonl +0 -0
  12. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/run_inference_experiment.log +124 -0
  13. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/.hydra/config.yaml +37 -0
  14. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/.hydra/hydra.yaml +155 -0
  15. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/.hydra/overrides.yaml +1 -0
  16. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/evaluation_results.csv +2 -0
  17. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/phi-4-phi4_classification_lora-C3_inference_results.jsonl +0 -0
  18. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/run_inference_experiment.log +124 -0
  19. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/.hydra/config.yaml +37 -0
  20. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/.hydra/hydra.yaml +155 -0
  21. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/.hydra/overrides.yaml +1 -0
  22. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/evaluation_results.csv +2 -0
  23. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/phi-4-phi4_classification_lora-C4_inference_results.jsonl +0 -0
  24. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/run_inference_experiment.log +124 -0
  25. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/.hydra/config.yaml +37 -0
  26. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/.hydra/hydra.yaml +155 -0
  27. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/.hydra/overrides.yaml +1 -0
  28. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/evaluation_results.csv +2 -0
  29. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/phi-4-phi4_classification_lora-C5_inference_results.jsonl +0 -0
  30. runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/run_inference_experiment.log +124 -0
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/.hydra/config.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ post_training_results:
12
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
13
+ experiments:
14
+ model:
15
+ name: microsoft/phi-4
16
+ type: phi4_classification_lora
17
+ num_labels: 6
18
+ output_dir: ./results/phi4-balanced/C1
19
+ logging_dir: ./logs/phi4-balanced/C1
20
+ best_model_dir: ./results/phi4-balanced/C1/best_model
21
+ lora_r: 8
22
+ lora_dropout: 0.05
23
+ lora_alpha: 16
24
+ lora_target_modules: all-linear
25
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C1
26
+ tokenizer:
27
+ name: microsoft/phi-4
28
+ dataset:
29
+ grade_index: 0
30
+ training_params:
31
+ weight_decay: 0.01
32
+ warmup_ratio: 0.1
33
+ learning_rate: 5.0e-05
34
+ train_batch_size: 4
35
+ eval_batch_size: 4
36
+ gradient_accumulation_steps: 4
37
+ gradient_checkpointing: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/.hydra/hydra.yaml ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: run_inference_experiment
117
+ chdir: null
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: config
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.3.2
131
+ version_base: '1.1'
132
+ cwd: /workspace/jbcs2025
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /workspace/jbcs2025/configs
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /workspace/jbcs2025/outputs/2025-05-26/15-16-56
144
+ choices:
145
+ experiments: slm_decoder_models/C1
146
+ hydra/env: default
147
+ hydra/callbacks: null
148
+ hydra/job_logging: default
149
+ hydra/hydra_logging: default
150
+ hydra/hydra_help: default
151
+ hydra/help: default
152
+ hydra/sweeper: basic
153
+ hydra/launcher: basic
154
+ hydra/output: default
155
+ verbose: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/evaluation_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.6014492753623188,27.662562992324986,0.6652455160246986,0.007246376811594235,0.4580067731011127,0.6014492753623188,0.6013651374602975,0,137,0,1,0,138,0,0,8,114,14,2,31,63,9,35,37,64,23,14,7,119,9,3,2025-05-26 15:16:56,phi-4-phi4_classification_lora-C1
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/phi-4-phi4_classification_lora-C1_inference_results.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C1/run_inference_experiment.log ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-05-26 15:16:56,821][__main__][INFO] - Starting inference experiment
2
+ [2025-05-26 15:16:56,823][__main__][INFO] - cache_dir: /tmp/
3
+ dataset:
4
+ name: kamel-usp/aes_enem_dataset
5
+ split: JBCS2025
6
+ training_params:
7
+ seed: 42
8
+ num_train_epochs: 20
9
+ logging_steps: 100
10
+ metric_for_best_model: QWK
11
+ bf16: true
12
+ post_training_results:
13
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
14
+ experiments:
15
+ model:
16
+ name: microsoft/phi-4
17
+ type: phi4_classification_lora
18
+ num_labels: 6
19
+ output_dir: ./results/phi4-balanced/C1
20
+ logging_dir: ./logs/phi4-balanced/C1
21
+ best_model_dir: ./results/phi4-balanced/C1/best_model
22
+ lora_r: 8
23
+ lora_dropout: 0.05
24
+ lora_alpha: 16
25
+ lora_target_modules: all-linear
26
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C1
27
+ tokenizer:
28
+ name: microsoft/phi-4
29
+ dataset:
30
+ grade_index: 0
31
+ training_params:
32
+ weight_decay: 0.01
33
+ warmup_ratio: 0.1
34
+ learning_rate: 5.0e-05
35
+ train_batch_size: 4
36
+ eval_batch_size: 4
37
+ gradient_accumulation_steps: 4
38
+ gradient_checkpointing: false
39
+
40
+ [2025-05-26 15:16:56,826][__main__][INFO] - Running inference with fine-tuned HF model
41
+ [2025-05-26 15:17:10,431][transformers.tokenization_utils_base][INFO] - loading file vocab.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/vocab.json
42
+ [2025-05-26 15:17:10,432][transformers.tokenization_utils_base][INFO] - loading file merges.txt from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/merges.txt
43
+ [2025-05-26 15:17:10,432][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer.json
44
+ [2025-05-26 15:17:10,432][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/added_tokens.json
45
+ [2025-05-26 15:17:10,432][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/special_tokens_map.json
46
+ [2025-05-26 15:17:10,433][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer_config.json
47
+ [2025-05-26 15:17:10,433][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
48
+ [2025-05-26 15:17:10,825][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
49
+ [2025-05-26 15:17:12,494][__main__][INFO] - Loading model from: microsoft/phi-4
50
+ [2025-05-26 15:17:13,247][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
51
+ [2025-05-26 15:17:13,251][transformers.configuration_utils][INFO] - Model config Phi3Config {
52
+ "architectures": [
53
+ "Phi3ForCausalLM"
54
+ ],
55
+ "attention_bias": false,
56
+ "attention_dropout": 0.0,
57
+ "bos_token_id": 100257,
58
+ "embd_pdrop": 0.0,
59
+ "eos_token_id": 100265,
60
+ "hidden_act": "silu",
61
+ "hidden_size": 5120,
62
+ "id2label": {
63
+ "0": 0,
64
+ "1": 40,
65
+ "2": 80,
66
+ "3": 120,
67
+ "4": 160,
68
+ "5": 200
69
+ },
70
+ "initializer_range": 0.02,
71
+ "intermediate_size": 17920,
72
+ "label2id": {
73
+ "0": 0,
74
+ "40": 1,
75
+ "80": 2,
76
+ "120": 3,
77
+ "160": 4,
78
+ "200": 5
79
+ },
80
+ "max_position_embeddings": 16384,
81
+ "model_type": "phi3",
82
+ "num_attention_heads": 40,
83
+ "num_hidden_layers": 40,
84
+ "num_key_value_heads": 10,
85
+ "original_max_position_embeddings": 16384,
86
+ "pad_token_id": 100349,
87
+ "partial_rotary_factor": 1.0,
88
+ "resid_pdrop": 0.0,
89
+ "rms_norm_eps": 1e-05,
90
+ "rope_scaling": null,
91
+ "rope_theta": 250000,
92
+ "sliding_window": null,
93
+ "tie_word_embeddings": false,
94
+ "torch_dtype": "bfloat16",
95
+ "transformers_version": "4.52.3",
96
+ "use_cache": true,
97
+ "vocab_size": 100352
98
+ }
99
+
100
+ [2025-05-26 15:17:14,641][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/model.safetensors.index.json
101
+ [2025-05-26 15:22:17,545][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
102
+ [2025-05-26 15:22:17,546][transformers.modeling_utils][INFO] - Instantiating Phi3ForSequenceClassification model under default dtype torch.bfloat16.
103
+ [2025-05-26 15:22:26,719][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at microsoft/phi-4 were not used when initializing Phi3ForSequenceClassification: ['lm_head.weight']
104
+ - This IS expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
105
+ - This IS NOT expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
106
+ [2025-05-26 15:22:26,719][transformers.modeling_utils][WARNING] - Some weights of Phi3ForSequenceClassification were not initialized from the model checkpoint at microsoft/phi-4 and are newly initialized: ['score.weight']
107
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
108
+ [2025-05-26 15:22:34,510][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_phi4-balanced-C1
109
+ [2025-05-26 15:22:34,515][__main__][INFO] - None
110
+ [2025-05-26 15:22:34,519][transformers.training_args][INFO] - PyTorch: setting up devices
111
+ [2025-05-26 15:22:34,579][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
112
+ [2025-05-26 15:22:34,599][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
113
+ [2025-05-26 15:22:34,748][transformers.trainer][INFO] - Using auto half precision backend
114
+ [2025-05-26 15:22:34,749][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
115
+ [2025-05-26 15:22:34,749][__main__][INFO] - Running inference on test dataset
116
+ [2025-05-26 15:22:34,751][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, essay_text, grades, prompt, reference, supporting_text, id, id_prompt. If essay_year, essay_text, grades, prompt, reference, supporting_text, id, id_prompt are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
117
+ [2025-05-26 15:22:34,783][transformers.trainer][INFO] -
118
+ ***** Running Prediction *****
119
+ [2025-05-26 15:22:34,783][transformers.trainer][INFO] - Num examples = 138
120
+ [2025-05-26 15:22:34,783][transformers.trainer][INFO] - Batch size = 4
121
+ [2025-05-26 15:24:20,731][transformers][INFO] - {'accuracy': 0.6014492753623188, 'RMSE': 27.662562992324986, 'QWK': 0.6652455160246986, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.4580067731011127, 'Micro_F1': 0.6014492753623188, 'Weighted_F1': 0.6013651374602975, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(8), 'TN_2': np.int64(114), 'FP_2': np.int64(14), 'FN_2': np.int64(2), 'TP_3': np.int64(31), 'TN_3': np.int64(63), 'FP_3': np.int64(9), 'FN_3': np.int64(35), 'TP_4': np.int64(37), 'TN_4': np.int64(64), 'FP_4': np.int64(23), 'FN_4': np.int64(14), 'TP_5': np.int64(7), 'TN_5': np.int64(119), 'FP_5': np.int64(9), 'FN_5': np.int64(3)}
122
+ [2025-05-26 15:24:20,753][__main__][INFO] - Inference results saved to phi-4-phi4_classification_lora-C1_inference_results.jsonl
123
+ [2025-05-26 15:24:20,754][__main__][INFO] - Inference results: {'accuracy': 0.6014492753623188, 'RMSE': 27.662562992324986, 'QWK': 0.6652455160246986, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.4580067731011127, 'Micro_F1': 0.6014492753623188, 'Weighted_F1': 0.6013651374602975, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(8), 'TN_2': np.int64(114), 'FP_2': np.int64(14), 'FN_2': np.int64(2), 'TP_3': np.int64(31), 'TN_3': np.int64(63), 'FP_3': np.int64(9), 'FN_3': np.int64(35), 'TP_4': np.int64(37), 'TN_4': np.int64(64), 'FP_4': np.int64(23), 'FN_4': np.int64(14), 'TP_5': np.int64(7), 'TN_5': np.int64(119), 'FP_5': np.int64(9), 'FN_5': np.int64(3)}
124
+ [2025-05-26 15:24:20,754][__main__][INFO] - Inference experiment completed
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/.hydra/config.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ post_training_results:
12
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
13
+ experiments:
14
+ model:
15
+ name: microsoft/phi-4
16
+ type: phi4_classification_lora
17
+ num_labels: 6
18
+ output_dir: ./results/phi4-balanced/C2
19
+ logging_dir: ./logs/phi4-balanced/C2
20
+ best_model_dir: ./results/phi4-balanced/C2/best_model
21
+ lora_r: 8
22
+ lora_dropout: 0.05
23
+ lora_alpha: 16
24
+ lora_target_modules: all-linear
25
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C2
26
+ tokenizer:
27
+ name: microsoft/phi-4
28
+ dataset:
29
+ grade_index: 1
30
+ training_params:
31
+ weight_decay: 0.01
32
+ warmup_ratio: 0.1
33
+ learning_rate: 5.0e-05
34
+ train_batch_size: 1
35
+ eval_batch_size: 4
36
+ gradient_accumulation_steps: 16
37
+ gradient_checkpointing: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/.hydra/hydra.yaml ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: run_inference_experiment
117
+ chdir: null
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: config
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.3.2
131
+ version_base: '1.1'
132
+ cwd: /workspace/jbcs2025
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /workspace/jbcs2025/configs
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /workspace/jbcs2025/outputs/2025-05-26/15-50-03
144
+ choices:
145
+ experiments: slm_decoder_models/C2
146
+ hydra/env: default
147
+ hydra/callbacks: null
148
+ hydra/job_logging: default
149
+ hydra/hydra_logging: default
150
+ hydra/hydra_help: default
151
+ hydra/help: default
152
+ hydra/sweeper: basic
153
+ hydra/launcher: basic
154
+ hydra/output: default
155
+ verbose: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/evaluation_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.47101449275362317,61.2904702146299,0.39747736093143593,0.08695652173913049,0.28940811083093393,0.47101449275362317,0.44952547422123834,0,137,0,1,21,78,25,14,0,133,0,5,31,60,27,20,6,106,6,20,7,103,15,13,2025-05-26 15:50:03,phi-4-phi4_classification_lora-C2
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/phi-4-phi4_classification_lora-C2_inference_results.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C2/run_inference_experiment.log ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-05-26 15:50:03,858][__main__][INFO] - Starting inference experiment
2
+ [2025-05-26 15:50:03,860][__main__][INFO] - cache_dir: /tmp/
3
+ dataset:
4
+ name: kamel-usp/aes_enem_dataset
5
+ split: JBCS2025
6
+ training_params:
7
+ seed: 42
8
+ num_train_epochs: 20
9
+ logging_steps: 100
10
+ metric_for_best_model: QWK
11
+ bf16: true
12
+ post_training_results:
13
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
14
+ experiments:
15
+ model:
16
+ name: microsoft/phi-4
17
+ type: phi4_classification_lora
18
+ num_labels: 6
19
+ output_dir: ./results/phi4-balanced/C2
20
+ logging_dir: ./logs/phi4-balanced/C2
21
+ best_model_dir: ./results/phi4-balanced/C2/best_model
22
+ lora_r: 8
23
+ lora_dropout: 0.05
24
+ lora_alpha: 16
25
+ lora_target_modules: all-linear
26
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C2
27
+ tokenizer:
28
+ name: microsoft/phi-4
29
+ dataset:
30
+ grade_index: 1
31
+ training_params:
32
+ weight_decay: 0.01
33
+ warmup_ratio: 0.1
34
+ learning_rate: 5.0e-05
35
+ train_batch_size: 1
36
+ eval_batch_size: 4
37
+ gradient_accumulation_steps: 16
38
+ gradient_checkpointing: false
39
+
40
+ [2025-05-26 15:50:03,863][__main__][INFO] - Running inference with fine-tuned HF model
41
+ [2025-05-26 15:50:11,781][transformers.tokenization_utils_base][INFO] - loading file vocab.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/vocab.json
42
+ [2025-05-26 15:50:11,782][transformers.tokenization_utils_base][INFO] - loading file merges.txt from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/merges.txt
43
+ [2025-05-26 15:50:11,782][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer.json
44
+ [2025-05-26 15:50:11,782][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/added_tokens.json
45
+ [2025-05-26 15:50:11,782][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/special_tokens_map.json
46
+ [2025-05-26 15:50:11,782][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer_config.json
47
+ [2025-05-26 15:50:11,783][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
48
+ [2025-05-26 15:50:12,042][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
49
+ [2025-05-26 15:50:12,617][__main__][INFO] - Loading model from: microsoft/phi-4
50
+ [2025-05-26 15:50:12,863][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
51
+ [2025-05-26 15:50:12,866][transformers.configuration_utils][INFO] - Model config Phi3Config {
52
+ "architectures": [
53
+ "Phi3ForCausalLM"
54
+ ],
55
+ "attention_bias": false,
56
+ "attention_dropout": 0.0,
57
+ "bos_token_id": 100257,
58
+ "embd_pdrop": 0.0,
59
+ "eos_token_id": 100265,
60
+ "hidden_act": "silu",
61
+ "hidden_size": 5120,
62
+ "id2label": {
63
+ "0": 0,
64
+ "1": 40,
65
+ "2": 80,
66
+ "3": 120,
67
+ "4": 160,
68
+ "5": 200
69
+ },
70
+ "initializer_range": 0.02,
71
+ "intermediate_size": 17920,
72
+ "label2id": {
73
+ "0": 0,
74
+ "40": 1,
75
+ "80": 2,
76
+ "120": 3,
77
+ "160": 4,
78
+ "200": 5
79
+ },
80
+ "max_position_embeddings": 16384,
81
+ "model_type": "phi3",
82
+ "num_attention_heads": 40,
83
+ "num_hidden_layers": 40,
84
+ "num_key_value_heads": 10,
85
+ "original_max_position_embeddings": 16384,
86
+ "pad_token_id": 100349,
87
+ "partial_rotary_factor": 1.0,
88
+ "resid_pdrop": 0.0,
89
+ "rms_norm_eps": 1e-05,
90
+ "rope_scaling": null,
91
+ "rope_theta": 250000,
92
+ "sliding_window": null,
93
+ "tie_word_embeddings": false,
94
+ "torch_dtype": "bfloat16",
95
+ "transformers_version": "4.52.3",
96
+ "use_cache": true,
97
+ "vocab_size": 100352
98
+ }
99
+
100
+ [2025-05-26 15:50:13,103][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/model.safetensors.index.json
101
+ [2025-05-26 15:50:13,104][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
102
+ [2025-05-26 15:50:13,104][transformers.modeling_utils][INFO] - Instantiating Phi3ForSequenceClassification model under default dtype torch.bfloat16.
103
+ [2025-05-26 15:50:21,981][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at microsoft/phi-4 were not used when initializing Phi3ForSequenceClassification: ['lm_head.weight']
104
+ - This IS expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
105
+ - This IS NOT expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
106
+ [2025-05-26 15:50:21,981][transformers.modeling_utils][WARNING] - Some weights of Phi3ForSequenceClassification were not initialized from the model checkpoint at microsoft/phi-4 and are newly initialized: ['score.weight']
107
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
108
+ [2025-05-26 15:50:24,590][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_phi4-balanced-C2
109
+ [2025-05-26 15:50:24,595][__main__][INFO] - None
110
+ [2025-05-26 15:50:24,598][transformers.training_args][INFO] - PyTorch: setting up devices
111
+ [2025-05-26 15:50:24,649][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
112
+ [2025-05-26 15:50:24,665][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
113
+ [2025-05-26 15:50:24,700][transformers.trainer][INFO] - Using auto half precision backend
114
+ [2025-05-26 15:50:24,701][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
115
+ [2025-05-26 15:50:24,701][__main__][INFO] - Running inference on test dataset
116
+ [2025-05-26 15:50:24,702][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: reference, essay_year, id_prompt, supporting_text, prompt, id, essay_text, grades. If reference, essay_year, id_prompt, supporting_text, prompt, id, essay_text, grades are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
117
+ [2025-05-26 15:50:24,730][transformers.trainer][INFO] -
118
+ ***** Running Prediction *****
119
+ [2025-05-26 15:50:24,730][transformers.trainer][INFO] - Num examples = 138
120
+ [2025-05-26 15:50:24,730][transformers.trainer][INFO] - Batch size = 4
121
+ [2025-05-26 15:52:59,219][transformers][INFO] - {'accuracy': 0.47101449275362317, 'RMSE': 61.2904702146299, 'QWK': 0.39747736093143593, 'HDIV': 0.08695652173913049, 'Macro_F1': 0.28940811083093393, 'Micro_F1': 0.47101449275362317, 'Weighted_F1': 0.44952547422123834, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(21), 'TN_1': np.int64(78), 'FP_1': np.int64(25), 'FN_1': np.int64(14), 'TP_2': np.int64(0), 'TN_2': np.int64(133), 'FP_2': np.int64(0), 'FN_2': np.int64(5), 'TP_3': np.int64(31), 'TN_3': np.int64(60), 'FP_3': np.int64(27), 'FN_3': np.int64(20), 'TP_4': np.int64(6), 'TN_4': np.int64(106), 'FP_4': np.int64(6), 'FN_4': np.int64(20), 'TP_5': np.int64(7), 'TN_5': np.int64(103), 'FP_5': np.int64(15), 'FN_5': np.int64(13)}
122
+ [2025-05-26 15:52:59,240][__main__][INFO] - Inference results saved to phi-4-phi4_classification_lora-C2_inference_results.jsonl
123
+ [2025-05-26 15:52:59,241][__main__][INFO] - Inference results: {'accuracy': 0.47101449275362317, 'RMSE': 61.2904702146299, 'QWK': 0.39747736093143593, 'HDIV': 0.08695652173913049, 'Macro_F1': 0.28940811083093393, 'Micro_F1': 0.47101449275362317, 'Weighted_F1': 0.44952547422123834, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(21), 'TN_1': np.int64(78), 'FP_1': np.int64(25), 'FN_1': np.int64(14), 'TP_2': np.int64(0), 'TN_2': np.int64(133), 'FP_2': np.int64(0), 'FN_2': np.int64(5), 'TP_3': np.int64(31), 'TN_3': np.int64(60), 'FP_3': np.int64(27), 'FN_3': np.int64(20), 'TP_4': np.int64(6), 'TN_4': np.int64(106), 'FP_4': np.int64(6), 'FN_4': np.int64(20), 'TP_5': np.int64(7), 'TN_5': np.int64(103), 'FP_5': np.int64(15), 'FN_5': np.int64(13)}
124
+ [2025-05-26 15:52:59,241][__main__][INFO] - Inference experiment completed
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/.hydra/config.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ post_training_results:
12
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
13
+ experiments:
14
+ model:
15
+ name: microsoft/phi-4
16
+ type: phi4_classification_lora
17
+ num_labels: 6
18
+ output_dir: ./results/phi4-balanced/C3
19
+ logging_dir: ./logs/phi4-balanced/C3
20
+ best_model_dir: ./results/phi4-balanced/C3/best_model
21
+ lora_r: 8
22
+ lora_dropout: 0.05
23
+ lora_alpha: 16
24
+ lora_target_modules: all-linear
25
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C3
26
+ tokenizer:
27
+ name: microsoft/phi-4
28
+ dataset:
29
+ grade_index: 2
30
+ training_params:
31
+ weight_decay: 0.01
32
+ warmup_ratio: 0.1
33
+ learning_rate: 5.0e-05
34
+ train_batch_size: 1
35
+ eval_batch_size: 2
36
+ gradient_accumulation_steps: 16
37
+ gradient_checkpointing: true
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/.hydra/hydra.yaml ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: run_inference_experiment
117
+ chdir: null
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: config
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.3.2
131
+ version_base: '1.1'
132
+ cwd: /workspace/jbcs2025
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /workspace/jbcs2025/configs
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /workspace/jbcs2025/outputs/2025-05-26/15-54-03
144
+ choices:
145
+ experiments: slm_decoder_models/C3
146
+ hydra/env: default
147
+ hydra/callbacks: null
148
+ hydra/job_logging: default
149
+ hydra/hydra_logging: default
150
+ hydra/hydra_help: default
151
+ hydra/help: default
152
+ hydra/sweeper: basic
153
+ hydra/launcher: basic
154
+ hydra/output: default
155
+ verbose: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/evaluation_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.3333333333333333,49.22589162013322,0.29381519024533553,0.036231884057971064,0.20859754797807895,0.3333333333333333,0.2747500757119841,0,137,0,1,0,109,0,29,14,86,34,4,24,49,44,21,7,93,7,31,1,124,7,6,2025-05-26 15:54:03,phi-4-phi4_classification_lora-C3
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/phi-4-phi4_classification_lora-C3_inference_results.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C3/run_inference_experiment.log ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-05-26 15:54:03,983][__main__][INFO] - Starting inference experiment
2
+ [2025-05-26 15:54:03,986][__main__][INFO] - cache_dir: /tmp/
3
+ dataset:
4
+ name: kamel-usp/aes_enem_dataset
5
+ split: JBCS2025
6
+ training_params:
7
+ seed: 42
8
+ num_train_epochs: 20
9
+ logging_steps: 100
10
+ metric_for_best_model: QWK
11
+ bf16: true
12
+ post_training_results:
13
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
14
+ experiments:
15
+ model:
16
+ name: microsoft/phi-4
17
+ type: phi4_classification_lora
18
+ num_labels: 6
19
+ output_dir: ./results/phi4-balanced/C3
20
+ logging_dir: ./logs/phi4-balanced/C3
21
+ best_model_dir: ./results/phi4-balanced/C3/best_model
22
+ lora_r: 8
23
+ lora_dropout: 0.05
24
+ lora_alpha: 16
25
+ lora_target_modules: all-linear
26
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C3
27
+ tokenizer:
28
+ name: microsoft/phi-4
29
+ dataset:
30
+ grade_index: 2
31
+ training_params:
32
+ weight_decay: 0.01
33
+ warmup_ratio: 0.1
34
+ learning_rate: 5.0e-05
35
+ train_batch_size: 1
36
+ eval_batch_size: 2
37
+ gradient_accumulation_steps: 16
38
+ gradient_checkpointing: true
39
+
40
+ [2025-05-26 15:54:03,989][__main__][INFO] - Running inference with fine-tuned HF model
41
+ [2025-05-26 15:54:10,518][transformers.tokenization_utils_base][INFO] - loading file vocab.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/vocab.json
42
+ [2025-05-26 15:54:10,518][transformers.tokenization_utils_base][INFO] - loading file merges.txt from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/merges.txt
43
+ [2025-05-26 15:54:10,518][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer.json
44
+ [2025-05-26 15:54:10,518][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/added_tokens.json
45
+ [2025-05-26 15:54:10,518][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/special_tokens_map.json
46
+ [2025-05-26 15:54:10,519][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer_config.json
47
+ [2025-05-26 15:54:10,519][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
48
+ [2025-05-26 15:54:10,885][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
49
+ [2025-05-26 15:54:12,790][__main__][INFO] - Loading model from: microsoft/phi-4
50
+ [2025-05-26 15:54:13,140][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
51
+ [2025-05-26 15:54:13,143][transformers.configuration_utils][INFO] - Model config Phi3Config {
52
+ "architectures": [
53
+ "Phi3ForCausalLM"
54
+ ],
55
+ "attention_bias": false,
56
+ "attention_dropout": 0.0,
57
+ "bos_token_id": 100257,
58
+ "embd_pdrop": 0.0,
59
+ "eos_token_id": 100265,
60
+ "hidden_act": "silu",
61
+ "hidden_size": 5120,
62
+ "id2label": {
63
+ "0": 0,
64
+ "1": 40,
65
+ "2": 80,
66
+ "3": 120,
67
+ "4": 160,
68
+ "5": 200
69
+ },
70
+ "initializer_range": 0.02,
71
+ "intermediate_size": 17920,
72
+ "label2id": {
73
+ "0": 0,
74
+ "40": 1,
75
+ "80": 2,
76
+ "120": 3,
77
+ "160": 4,
78
+ "200": 5
79
+ },
80
+ "max_position_embeddings": 16384,
81
+ "model_type": "phi3",
82
+ "num_attention_heads": 40,
83
+ "num_hidden_layers": 40,
84
+ "num_key_value_heads": 10,
85
+ "original_max_position_embeddings": 16384,
86
+ "pad_token_id": 100349,
87
+ "partial_rotary_factor": 1.0,
88
+ "resid_pdrop": 0.0,
89
+ "rms_norm_eps": 1e-05,
90
+ "rope_scaling": null,
91
+ "rope_theta": 250000,
92
+ "sliding_window": null,
93
+ "tie_word_embeddings": false,
94
+ "torch_dtype": "bfloat16",
95
+ "transformers_version": "4.52.3",
96
+ "use_cache": true,
97
+ "vocab_size": 100352
98
+ }
99
+
100
+ [2025-05-26 15:54:13,382][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/model.safetensors.index.json
101
+ [2025-05-26 15:54:13,382][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
102
+ [2025-05-26 15:54:13,383][transformers.modeling_utils][INFO] - Instantiating Phi3ForSequenceClassification model under default dtype torch.bfloat16.
103
+ [2025-05-26 15:54:22,204][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at microsoft/phi-4 were not used when initializing Phi3ForSequenceClassification: ['lm_head.weight']
104
+ - This IS expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
105
+ - This IS NOT expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
106
+ [2025-05-26 15:54:22,205][transformers.modeling_utils][WARNING] - Some weights of Phi3ForSequenceClassification were not initialized from the model checkpoint at microsoft/phi-4 and are newly initialized: ['score.weight']
107
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
108
+ [2025-05-26 15:54:29,389][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_phi4-balanced-C3
109
+ [2025-05-26 15:54:29,395][__main__][INFO] - None
110
+ [2025-05-26 15:54:29,400][transformers.training_args][INFO] - PyTorch: setting up devices
111
+ [2025-05-26 15:54:29,475][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
112
+ [2025-05-26 15:54:29,495][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
113
+ [2025-05-26 15:54:29,534][transformers.trainer][INFO] - Using auto half precision backend
114
+ [2025-05-26 15:54:29,535][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
115
+ [2025-05-26 15:54:29,536][__main__][INFO] - Running inference on test dataset
116
+ [2025-05-26 15:54:29,537][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: grades, reference, essay_year, id, prompt, essay_text, id_prompt, supporting_text. If grades, reference, essay_year, id, prompt, essay_text, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
117
+ [2025-05-26 15:54:29,571][transformers.trainer][INFO] -
118
+ ***** Running Prediction *****
119
+ [2025-05-26 15:54:29,571][transformers.trainer][INFO] - Num examples = 138
120
+ [2025-05-26 15:54:29,571][transformers.trainer][INFO] - Batch size = 2
121
+ [2025-05-26 15:56:31,338][transformers][INFO] - {'accuracy': 0.3333333333333333, 'RMSE': 49.22589162013322, 'QWK': 0.29381519024533553, 'HDIV': 0.036231884057971064, 'Macro_F1': 0.20859754797807895, 'Micro_F1': 0.3333333333333333, 'Weighted_F1': 0.2747500757119841, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(14), 'TN_2': np.int64(86), 'FP_2': np.int64(34), 'FN_2': np.int64(4), 'TP_3': np.int64(24), 'TN_3': np.int64(49), 'FP_3': np.int64(44), 'FN_3': np.int64(21), 'TP_4': np.int64(7), 'TN_4': np.int64(93), 'FP_4': np.int64(7), 'FN_4': np.int64(31), 'TP_5': np.int64(1), 'TN_5': np.int64(124), 'FP_5': np.int64(7), 'FN_5': np.int64(6)}
122
+ [2025-05-26 15:56:31,364][__main__][INFO] - Inference results saved to phi-4-phi4_classification_lora-C3_inference_results.jsonl
123
+ [2025-05-26 15:56:31,364][__main__][INFO] - Inference results: {'accuracy': 0.3333333333333333, 'RMSE': 49.22589162013322, 'QWK': 0.29381519024533553, 'HDIV': 0.036231884057971064, 'Macro_F1': 0.20859754797807895, 'Micro_F1': 0.3333333333333333, 'Weighted_F1': 0.2747500757119841, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(14), 'TN_2': np.int64(86), 'FP_2': np.int64(34), 'FN_2': np.int64(4), 'TP_3': np.int64(24), 'TN_3': np.int64(49), 'FP_3': np.int64(44), 'FN_3': np.int64(21), 'TP_4': np.int64(7), 'TN_4': np.int64(93), 'FP_4': np.int64(7), 'FN_4': np.int64(31), 'TP_5': np.int64(1), 'TN_5': np.int64(124), 'FP_5': np.int64(7), 'FN_5': np.int64(6)}
124
+ [2025-05-26 15:56:31,365][__main__][INFO] - Inference experiment completed
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/.hydra/config.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ post_training_results:
12
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
13
+ experiments:
14
+ model:
15
+ name: microsoft/phi-4
16
+ type: phi4_classification_lora
17
+ num_labels: 6
18
+ output_dir: ./results/phi4-balanced/C4
19
+ logging_dir: ./logs/phi4-balanced/C4
20
+ best_model_dir: ./results/phi4-balanced/C4/best_model
21
+ lora_r: 8
22
+ lora_dropout: 0.05
23
+ lora_alpha: 16
24
+ lora_target_modules: all-linear
25
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C4
26
+ tokenizer:
27
+ name: microsoft/phi-4
28
+ dataset:
29
+ grade_index: 3
30
+ training_params:
31
+ weight_decay: 0.01
32
+ warmup_ratio: 0.1
33
+ learning_rate: 5.0e-05
34
+ train_batch_size: 2
35
+ eval_batch_size: 4
36
+ gradient_accumulation_steps: 8
37
+ gradient_checkpointing: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/.hydra/hydra.yaml ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: run_inference_experiment
117
+ chdir: null
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: config
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.3.2
131
+ version_base: '1.1'
132
+ cwd: /workspace/jbcs2025
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /workspace/jbcs2025/configs
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /workspace/jbcs2025/outputs/2025-05-26/15-57-10
144
+ choices:
145
+ experiments: slm_decoder_models/C4
146
+ hydra/env: default
147
+ hydra/callbacks: null
148
+ hydra/job_logging: default
149
+ hydra/hydra_logging: default
150
+ hydra/hydra_help: default
151
+ hydra/help: default
152
+ hydra/sweeper: basic
153
+ hydra/launcher: basic
154
+ hydra/output: default
155
+ verbose: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/evaluation_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.7028985507246377,24.55399256179405,0.579465541490858,0.007246376811594235,0.27468966776195697,0.7028985507246377,0.6761155293109196,0,137,0,1,0,135,2,1,1,128,1,8,67,39,23,9,29,79,13,17,0,131,2,5,2025-05-26 15:57:10,phi-4-phi4_classification_lora-C4
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/phi-4-phi4_classification_lora-C4_inference_results.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C4/run_inference_experiment.log ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-05-26 15:57:10,593][__main__][INFO] - Starting inference experiment
2
+ [2025-05-26 15:57:10,595][__main__][INFO] - cache_dir: /tmp/
3
+ dataset:
4
+ name: kamel-usp/aes_enem_dataset
5
+ split: JBCS2025
6
+ training_params:
7
+ seed: 42
8
+ num_train_epochs: 20
9
+ logging_steps: 100
10
+ metric_for_best_model: QWK
11
+ bf16: true
12
+ post_training_results:
13
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
14
+ experiments:
15
+ model:
16
+ name: microsoft/phi-4
17
+ type: phi4_classification_lora
18
+ num_labels: 6
19
+ output_dir: ./results/phi4-balanced/C4
20
+ logging_dir: ./logs/phi4-balanced/C4
21
+ best_model_dir: ./results/phi4-balanced/C4/best_model
22
+ lora_r: 8
23
+ lora_dropout: 0.05
24
+ lora_alpha: 16
25
+ lora_target_modules: all-linear
26
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C4
27
+ tokenizer:
28
+ name: microsoft/phi-4
29
+ dataset:
30
+ grade_index: 3
31
+ training_params:
32
+ weight_decay: 0.01
33
+ warmup_ratio: 0.1
34
+ learning_rate: 5.0e-05
35
+ train_batch_size: 2
36
+ eval_batch_size: 4
37
+ gradient_accumulation_steps: 8
38
+ gradient_checkpointing: false
39
+
40
+ [2025-05-26 15:57:10,598][__main__][INFO] - Running inference with fine-tuned HF model
41
+ [2025-05-26 15:57:16,626][transformers.tokenization_utils_base][INFO] - loading file vocab.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/vocab.json
42
+ [2025-05-26 15:57:16,627][transformers.tokenization_utils_base][INFO] - loading file merges.txt from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/merges.txt
43
+ [2025-05-26 15:57:16,627][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer.json
44
+ [2025-05-26 15:57:16,627][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/added_tokens.json
45
+ [2025-05-26 15:57:16,627][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/special_tokens_map.json
46
+ [2025-05-26 15:57:16,627][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer_config.json
47
+ [2025-05-26 15:57:16,627][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
48
+ [2025-05-26 15:57:17,010][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
49
+ [2025-05-26 15:57:18,888][__main__][INFO] - Loading model from: microsoft/phi-4
50
+ [2025-05-26 15:57:19,197][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
51
+ [2025-05-26 15:57:19,201][transformers.configuration_utils][INFO] - Model config Phi3Config {
52
+ "architectures": [
53
+ "Phi3ForCausalLM"
54
+ ],
55
+ "attention_bias": false,
56
+ "attention_dropout": 0.0,
57
+ "bos_token_id": 100257,
58
+ "embd_pdrop": 0.0,
59
+ "eos_token_id": 100265,
60
+ "hidden_act": "silu",
61
+ "hidden_size": 5120,
62
+ "id2label": {
63
+ "0": 0,
64
+ "1": 40,
65
+ "2": 80,
66
+ "3": 120,
67
+ "4": 160,
68
+ "5": 200
69
+ },
70
+ "initializer_range": 0.02,
71
+ "intermediate_size": 17920,
72
+ "label2id": {
73
+ "0": 0,
74
+ "40": 1,
75
+ "80": 2,
76
+ "120": 3,
77
+ "160": 4,
78
+ "200": 5
79
+ },
80
+ "max_position_embeddings": 16384,
81
+ "model_type": "phi3",
82
+ "num_attention_heads": 40,
83
+ "num_hidden_layers": 40,
84
+ "num_key_value_heads": 10,
85
+ "original_max_position_embeddings": 16384,
86
+ "pad_token_id": 100349,
87
+ "partial_rotary_factor": 1.0,
88
+ "resid_pdrop": 0.0,
89
+ "rms_norm_eps": 1e-05,
90
+ "rope_scaling": null,
91
+ "rope_theta": 250000,
92
+ "sliding_window": null,
93
+ "tie_word_embeddings": false,
94
+ "torch_dtype": "bfloat16",
95
+ "transformers_version": "4.52.3",
96
+ "use_cache": true,
97
+ "vocab_size": 100352
98
+ }
99
+
100
+ [2025-05-26 15:57:19,442][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/model.safetensors.index.json
101
+ [2025-05-26 15:57:19,443][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
102
+ [2025-05-26 15:57:19,443][transformers.modeling_utils][INFO] - Instantiating Phi3ForSequenceClassification model under default dtype torch.bfloat16.
103
+ [2025-05-26 15:57:28,301][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at microsoft/phi-4 were not used when initializing Phi3ForSequenceClassification: ['lm_head.weight']
104
+ - This IS expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
105
+ - This IS NOT expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
106
+ [2025-05-26 15:57:28,302][transformers.modeling_utils][WARNING] - Some weights of Phi3ForSequenceClassification were not initialized from the model checkpoint at microsoft/phi-4 and are newly initialized: ['score.weight']
107
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
108
+ [2025-05-26 15:57:36,548][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_phi4-balanced-C4
109
+ [2025-05-26 15:57:36,553][__main__][INFO] - None
110
+ [2025-05-26 15:57:36,558][transformers.training_args][INFO] - PyTorch: setting up devices
111
+ [2025-05-26 15:57:36,657][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
112
+ [2025-05-26 15:57:36,678][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
113
+ [2025-05-26 15:57:36,719][transformers.trainer][INFO] - Using auto half precision backend
114
+ [2025-05-26 15:57:36,720][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
115
+ [2025-05-26 15:57:36,720][__main__][INFO] - Running inference on test dataset
116
+ [2025-05-26 15:57:36,722][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id, grades, prompt, essay_year, supporting_text, essay_text, id_prompt, reference. If id, grades, prompt, essay_year, supporting_text, essay_text, id_prompt, reference are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
117
+ [2025-05-26 15:57:36,754][transformers.trainer][INFO] -
118
+ ***** Running Prediction *****
119
+ [2025-05-26 15:57:36,754][transformers.trainer][INFO] - Num examples = 138
120
+ [2025-05-26 15:57:36,754][transformers.trainer][INFO] - Batch size = 4
121
+ [2025-05-26 15:59:37,941][transformers][INFO] - {'accuracy': 0.7028985507246377, 'RMSE': 24.55399256179405, 'QWK': 0.579465541490858, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.27468966776195697, 'Micro_F1': 0.7028985507246377, 'Weighted_F1': 0.6761155293109196, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(135), 'FP_1': np.int64(2), 'FN_1': np.int64(1), 'TP_2': np.int64(1), 'TN_2': np.int64(128), 'FP_2': np.int64(1), 'FN_2': np.int64(8), 'TP_3': np.int64(67), 'TN_3': np.int64(39), 'FP_3': np.int64(23), 'FN_3': np.int64(9), 'TP_4': np.int64(29), 'TN_4': np.int64(79), 'FP_4': np.int64(13), 'FN_4': np.int64(17), 'TP_5': np.int64(0), 'TN_5': np.int64(131), 'FP_5': np.int64(2), 'FN_5': np.int64(5)}
122
+ [2025-05-26 15:59:37,967][__main__][INFO] - Inference results saved to phi-4-phi4_classification_lora-C4_inference_results.jsonl
123
+ [2025-05-26 15:59:37,967][__main__][INFO] - Inference results: {'accuracy': 0.7028985507246377, 'RMSE': 24.55399256179405, 'QWK': 0.579465541490858, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.27468966776195697, 'Micro_F1': 0.7028985507246377, 'Weighted_F1': 0.6761155293109196, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(135), 'FP_1': np.int64(2), 'FN_1': np.int64(1), 'TP_2': np.int64(1), 'TN_2': np.int64(128), 'FP_2': np.int64(1), 'FN_2': np.int64(8), 'TP_3': np.int64(67), 'TN_3': np.int64(39), 'FP_3': np.int64(23), 'FN_3': np.int64(9), 'TP_4': np.int64(29), 'TN_4': np.int64(79), 'FP_4': np.int64(13), 'FN_4': np.int64(17), 'TP_5': np.int64(0), 'TN_5': np.int64(131), 'FP_5': np.int64(2), 'FN_5': np.int64(5)}
124
+ [2025-05-26 15:59:37,968][__main__][INFO] - Inference experiment completed
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/.hydra/config.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ post_training_results:
12
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
13
+ experiments:
14
+ model:
15
+ name: microsoft/phi-4
16
+ type: phi4_classification_lora
17
+ num_labels: 6
18
+ output_dir: ./results/phi4-balanced/C5
19
+ logging_dir: ./logs/phi4-balanced/C5
20
+ best_model_dir: ./results/phi4-balanced/C5/best_model
21
+ lora_r: 8
22
+ lora_dropout: 0.05
23
+ lora_alpha: 16
24
+ lora_target_modules: all-linear
25
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C5
26
+ tokenizer:
27
+ name: microsoft/phi-4
28
+ dataset:
29
+ grade_index: 4
30
+ training_params:
31
+ weight_decay: 0.01
32
+ warmup_ratio: 0.1
33
+ learning_rate: 5.0e-05
34
+ train_batch_size: 2
35
+ eval_batch_size: 4
36
+ gradient_accumulation_steps: 8
37
+ gradient_checkpointing: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/.hydra/hydra.yaml ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: run_inference_experiment
117
+ chdir: null
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: config
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.3.2
131
+ version_base: '1.1'
132
+ cwd: /workspace/jbcs2025
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /workspace/jbcs2025/configs
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /workspace/jbcs2025/outputs/2025-05-26/16-00-16
144
+ choices:
145
+ experiments: slm_decoder_models/C5
146
+ hydra/env: default
147
+ hydra/callbacks: null
148
+ hydra/job_logging: default
149
+ hydra/hydra_logging: default
150
+ hydra/hydra_help: default
151
+ hydra/help: default
152
+ hydra/sweeper: basic
153
+ hydra/launcher: basic
154
+ hydra/output: default
155
+ verbose: false
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/evaluation_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.3115942028985507,60.91095901015048,0.45744053469628465,0.1376811594202898,0.18965878221692178,0.3115942028985507,0.24393006835878425,3,115,1,19,17,55,51,15,1,103,11,23,0,113,0,25,22,74,32,10,0,135,0,3,2025-05-26 16:00:16,phi-4-phi4_classification_lora-C5
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/phi-4-phi4_classification_lora-C5_inference_results.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
runs/slm_decoder_models/phi-4/phi-4-phi4_classification_lora-C5/run_inference_experiment.log ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-05-26 16:00:16,898][__main__][INFO] - Starting inference experiment
2
+ [2025-05-26 16:00:16,900][__main__][INFO] - cache_dir: /tmp/
3
+ dataset:
4
+ name: kamel-usp/aes_enem_dataset
5
+ split: JBCS2025
6
+ training_params:
7
+ seed: 42
8
+ num_train_epochs: 20
9
+ logging_steps: 100
10
+ metric_for_best_model: QWK
11
+ bf16: true
12
+ post_training_results:
13
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
14
+ experiments:
15
+ model:
16
+ name: microsoft/phi-4
17
+ type: phi4_classification_lora
18
+ num_labels: 6
19
+ output_dir: ./results/phi4-balanced/C5
20
+ logging_dir: ./logs/phi4-balanced/C5
21
+ best_model_dir: ./results/phi4-balanced/C5/best_model
22
+ lora_r: 8
23
+ lora_dropout: 0.05
24
+ lora_alpha: 16
25
+ lora_target_modules: all-linear
26
+ checkpoint_path: kamel-usp/jbcs2025_phi4-balanced-C5
27
+ tokenizer:
28
+ name: microsoft/phi-4
29
+ dataset:
30
+ grade_index: 4
31
+ training_params:
32
+ weight_decay: 0.01
33
+ warmup_ratio: 0.1
34
+ learning_rate: 5.0e-05
35
+ train_batch_size: 2
36
+ eval_batch_size: 4
37
+ gradient_accumulation_steps: 8
38
+ gradient_checkpointing: false
39
+
40
+ [2025-05-26 16:00:16,903][__main__][INFO] - Running inference with fine-tuned HF model
41
+ [2025-05-26 16:00:22,843][transformers.tokenization_utils_base][INFO] - loading file vocab.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/vocab.json
42
+ [2025-05-26 16:00:22,843][transformers.tokenization_utils_base][INFO] - loading file merges.txt from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/merges.txt
43
+ [2025-05-26 16:00:22,843][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer.json
44
+ [2025-05-26 16:00:22,843][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/added_tokens.json
45
+ [2025-05-26 16:00:22,844][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/special_tokens_map.json
46
+ [2025-05-26 16:00:22,844][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer_config.json
47
+ [2025-05-26 16:00:22,844][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
48
+ [2025-05-26 16:00:23,193][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
49
+ [2025-05-26 16:00:25,150][__main__][INFO] - Loading model from: microsoft/phi-4
50
+ [2025-05-26 16:00:25,402][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
51
+ [2025-05-26 16:00:25,405][transformers.configuration_utils][INFO] - Model config Phi3Config {
52
+ "architectures": [
53
+ "Phi3ForCausalLM"
54
+ ],
55
+ "attention_bias": false,
56
+ "attention_dropout": 0.0,
57
+ "bos_token_id": 100257,
58
+ "embd_pdrop": 0.0,
59
+ "eos_token_id": 100265,
60
+ "hidden_act": "silu",
61
+ "hidden_size": 5120,
62
+ "id2label": {
63
+ "0": 0,
64
+ "1": 40,
65
+ "2": 80,
66
+ "3": 120,
67
+ "4": 160,
68
+ "5": 200
69
+ },
70
+ "initializer_range": 0.02,
71
+ "intermediate_size": 17920,
72
+ "label2id": {
73
+ "0": 0,
74
+ "40": 1,
75
+ "80": 2,
76
+ "120": 3,
77
+ "160": 4,
78
+ "200": 5
79
+ },
80
+ "max_position_embeddings": 16384,
81
+ "model_type": "phi3",
82
+ "num_attention_heads": 40,
83
+ "num_hidden_layers": 40,
84
+ "num_key_value_heads": 10,
85
+ "original_max_position_embeddings": 16384,
86
+ "pad_token_id": 100349,
87
+ "partial_rotary_factor": 1.0,
88
+ "resid_pdrop": 0.0,
89
+ "rms_norm_eps": 1e-05,
90
+ "rope_scaling": null,
91
+ "rope_theta": 250000,
92
+ "sliding_window": null,
93
+ "tie_word_embeddings": false,
94
+ "torch_dtype": "bfloat16",
95
+ "transformers_version": "4.52.3",
96
+ "use_cache": true,
97
+ "vocab_size": 100352
98
+ }
99
+
100
+ [2025-05-26 16:00:25,642][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/model.safetensors.index.json
101
+ [2025-05-26 16:00:25,642][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
102
+ [2025-05-26 16:00:25,642][transformers.modeling_utils][INFO] - Instantiating Phi3ForSequenceClassification model under default dtype torch.bfloat16.
103
+ [2025-05-26 16:00:34,576][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at microsoft/phi-4 were not used when initializing Phi3ForSequenceClassification: ['lm_head.weight']
104
+ - This IS expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
105
+ - This IS NOT expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
106
+ [2025-05-26 16:00:34,576][transformers.modeling_utils][WARNING] - Some weights of Phi3ForSequenceClassification were not initialized from the model checkpoint at microsoft/phi-4 and are newly initialized: ['score.weight']
107
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
108
+ [2025-05-26 16:00:42,106][__main__][INFO] - Loaded pre-trained PEFT model from kamel-usp/jbcs2025_phi4-balanced-C5
109
+ [2025-05-26 16:00:42,112][__main__][INFO] - None
110
+ [2025-05-26 16:00:42,117][transformers.training_args][INFO] - PyTorch: setting up devices
111
+ [2025-05-26 16:00:42,170][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
112
+ [2025-05-26 16:00:42,190][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
113
+ [2025-05-26 16:00:42,228][transformers.trainer][INFO] - Using auto half precision backend
114
+ [2025-05-26 16:00:42,229][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
115
+ [2025-05-26 16:00:42,230][__main__][INFO] - Running inference on test dataset
116
+ [2025-05-26 16:00:42,231][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id, id_prompt, essay_text, essay_year, grades, supporting_text, reference, prompt. If id, id_prompt, essay_text, essay_year, grades, supporting_text, reference, prompt are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
117
+ [2025-05-26 16:00:42,262][transformers.trainer][INFO] -
118
+ ***** Running Prediction *****
119
+ [2025-05-26 16:00:42,262][transformers.trainer][INFO] - Num examples = 138
120
+ [2025-05-26 16:00:42,262][transformers.trainer][INFO] - Batch size = 4
121
+ [2025-05-26 16:02:50,833][transformers][INFO] - {'accuracy': 0.3115942028985507, 'RMSE': 60.91095901015048, 'QWK': 0.45744053469628465, 'HDIV': 0.1376811594202898, 'Macro_F1': 0.18965878221692178, 'Micro_F1': 0.3115942028985507, 'Weighted_F1': 0.24393006835878425, 'TP_0': np.int64(3), 'TN_0': np.int64(115), 'FP_0': np.int64(1), 'FN_0': np.int64(19), 'TP_1': np.int64(17), 'TN_1': np.int64(55), 'FP_1': np.int64(51), 'FN_1': np.int64(15), 'TP_2': np.int64(1), 'TN_2': np.int64(103), 'FP_2': np.int64(11), 'FN_2': np.int64(23), 'TP_3': np.int64(0), 'TN_3': np.int64(113), 'FP_3': np.int64(0), 'FN_3': np.int64(25), 'TP_4': np.int64(22), 'TN_4': np.int64(74), 'FP_4': np.int64(32), 'FN_4': np.int64(10), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)}
122
+ [2025-05-26 16:02:50,854][__main__][INFO] - Inference results saved to phi-4-phi4_classification_lora-C5_inference_results.jsonl
123
+ [2025-05-26 16:02:50,855][__main__][INFO] - Inference results: {'accuracy': 0.3115942028985507, 'RMSE': 60.91095901015048, 'QWK': 0.45744053469628465, 'HDIV': 0.1376811594202898, 'Macro_F1': 0.18965878221692178, 'Micro_F1': 0.3115942028985507, 'Weighted_F1': 0.24393006835878425, 'TP_0': np.int64(3), 'TN_0': np.int64(115), 'FP_0': np.int64(1), 'FN_0': np.int64(19), 'TP_1': np.int64(17), 'TN_1': np.int64(55), 'FP_1': np.int64(51), 'FN_1': np.int64(15), 'TP_2': np.int64(1), 'TN_2': np.int64(103), 'FP_2': np.int64(11), 'FN_2': np.int64(23), 'TP_3': np.int64(0), 'TN_3': np.int64(113), 'FP_3': np.int64(0), 'FN_3': np.int64(25), 'TP_4': np.int64(22), 'TN_4': np.int64(74), 'FP_4': np.int64(32), 'FN_4': np.int64(10), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)}
124
+ [2025-05-26 16:02:50,855][__main__][INFO] - Inference experiment completed