jaio98 commited on
Commit
a88e206
·
verified ·
1 Parent(s): d0ca261

Upload 10 files

Browse files
generative/config.json ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "datasets": {
3
+ "xnli-eu-native": {
4
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-eu-native.tsv",
5
+ "data_path_paraphrase": "",
6
+ "prem_col": "premise",
7
+ "hyp_col": "hypothesis",
8
+ "label_col": "label",
9
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
10
+ },
11
+ "xnli-eu-var": {
12
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-eu-var.tsv",
13
+ "data_path_paraphrase": "",
14
+ "prem_col": "premise",
15
+ "hyp_col": "hypothesis",
16
+ "label_col": "label",
17
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
18
+ },
19
+ "xnli-es-native": {
20
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/es/xnli-eu2es-native.tsv",
21
+ "data_path_paraphrase": "",
22
+ "prem_col": "premise",
23
+ "hyp_col": "hypothesis",
24
+ "label_col": "label",
25
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
26
+ },
27
+ "xnli-es-var": {
28
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/es/xnli-es-var.tsv",
29
+ "data_path_paraphrase": "",
30
+ "prem_col": "premise",
31
+ "hyp_col": "hypothesis",
32
+ "label_col": "label",
33
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
34
+ },
35
+ "xnli-en": {
36
+ "data_path": "/tartalo01/users/jbengoetxea004/phd/xnli-paraphrasing/xnli-var-decoders/scripts/parquet-con/xnli-en-test.tsv",
37
+ "data_path_paraphrase": "",
38
+ "prem_col": "premise",
39
+ "hyp_col": "hypothesis",
40
+ "label_col": "label",
41
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
42
+ },
43
+ "xnli-es": {
44
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/es/xnli-es-original.tsv",
45
+ "data_path_paraphrase": "",
46
+ "prem_col": "premise",
47
+ "hyp_col": "hypothesis",
48
+ "label_col": "label",
49
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
50
+ },
51
+ "xnli-eu": {
52
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-eu-original.tsv",
53
+ "data_path_paraphrase": "",
54
+ "prem_col": "premise",
55
+ "hyp_col": "hypothesis",
56
+ "label_col": "label",
57
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
58
+ },
59
+ "xnli-eu-var-no-rep": {
60
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-native-var-eu-NO-REPETITION.tsv",
61
+ "data_path_paraphrase": "",
62
+ "prem_col": "premise",
63
+ "hyp_col": "hypothesis",
64
+ "label_col": "label",
65
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
66
+ },
67
+ "xnli-eu-var-less-gip": {
68
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-native-var-eu-less-gip.tsv",
69
+ "data_path_paraphrase": "",
70
+ "prem_col": "premise",
71
+ "hyp_col": "hypothesis",
72
+ "label_col": "label",
73
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
74
+ },
75
+ "xnli-eu-var-less-biz": {
76
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-native-var-eu-less-biz.tsv",
77
+ "data_path_paraphrase": "",
78
+ "prem_col": "premise",
79
+ "hyp_col": "hypothesis",
80
+ "label_col": "label",
81
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
82
+ },
83
+ "xnli-es-var-no-rep": {
84
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/es/xnli-native-var-es-no-rep.tsv",
85
+ "data_path_paraphrase": "",
86
+ "prem_col": "premise",
87
+ "hyp_col": "hypothesis",
88
+ "label_col": "label",
89
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
90
+ },
91
+ "xnli-eu-biz": {
92
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-test-bizkaiera-done.tsv",
93
+ "data_path_paraphrase": "",
94
+ "prem_col": "premise",
95
+ "hyp_col": "hypothesis",
96
+ "label_col": "label",
97
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
98
+ },
99
+ "xnli-eu-gip": {
100
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-test-gipuzkera-done.tsv",
101
+ "data_path_paraphrase": "",
102
+ "prem_col": "premise",
103
+ "hyp_col": "hypothesis",
104
+ "label_col": "label",
105
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
106
+ },
107
+ "xnli-eu-naf": {
108
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-test-nafar-lapurtera-done.tsv",
109
+ "data_path_paraphrase": "",
110
+ "prem_col": "premise",
111
+ "hyp_col": "hypothesis",
112
+ "label_col": "label",
113
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
114
+ },
115
+ "xnli-eu-nat-biz": {
116
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-native-bizkaieraz-done.tsv",
117
+ "data_path_paraphrase": "",
118
+ "prem_col": "premise",
119
+ "hyp_col": "hypothesis",
120
+ "label_col": "label",
121
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
122
+ },
123
+ "xnli-eu-nat-gip": {
124
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-native-gipuzkera-done.tsv",
125
+ "data_path_paraphrase": "",
126
+ "prem_col": "premise",
127
+ "hyp_col": "hypothesis",
128
+ "label_col": "label",
129
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
130
+ },
131
+ "xnli-eu-nat-naf": {
132
+ "data_path": "/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-native-nafar-lapurtera-done.tsv",
133
+ "data_path_paraphrase": "",
134
+ "prem_col": "premise",
135
+ "hyp_col": "hypothesis",
136
+ "label_col": "label",
137
+ "prompts": ["trilabel", "qa-zero", "qa-few"]
138
+ }
139
+ },
140
+ "models": {
141
+ "llama3instruct8": "meta-llama/Meta-Llama-3-8B-Instruct",
142
+ "llama3instruct70": "meta-llama/Meta-Llama-3-70B-Instruct",
143
+ "gemmainstruct9": "google/gemma-2-9b-it",
144
+ "gemmainstruct27": "google/gemma-2-27b-it",
145
+ "latxainstruct70": "HiTZ/Latxa-Llama-3.1-70B-Instruct",
146
+ "llama3base70": "meta-llama/Meta-Llama-3.1-70B"
147
+ },
148
+ "prompts": {
149
+ "trilabel": {
150
+ "nli-zero": {
151
+ "preffix": "Please, answer in one word, with one of the following labels: <entailment>, <contradiction> or <neutral> Use exactly one of these three labels.",
152
+ "label_mapping": {
153
+ "entailment": "entailment",
154
+ "contradiction": "contradiction",
155
+ "neutral": "neutral"
156
+ }
157
+ },
158
+ "nli-few": {
159
+ "preffix": "Say which is the inference relationship between these two sentences. Please, answer in one word, with one of the following labels: <entailment>, <contradiction> or <neutral> Use exactly one of these three labels. Here you have some examples: Postal Service were to reduce delivery frequency -> The postal service could deliver less frequently: <entailment>. This elegant spa town on the edge of the Lac du Bourget has offered cures for rheumatism and other ailments for centuries -> The town was only established in the past fifty years: <contradiction>. And while we allow people to give a kidney to their child , we do not allow them to donate their heart -> You can't always donate organs to your child: <neutral>. " ,
160
+ "label_mapping": {
161
+ "entailment": "entailment",
162
+ "contradiction": "contradiction",
163
+ "neutral": "neutral"
164
+ }
165
+ },
166
+ "qa-zero": {
167
+ "preffix": "Are these two sentences entailed, contradicted or undetermined to each other? Please, answer in one word, with one of the following labels: <entailment>, <contradiction> or <neutral> Use exactly one of these three labels.",
168
+ "label_mapping": {
169
+ "entailment": "entailment",
170
+ "contradiction": "contradiction",
171
+ "neutral": "neutral"
172
+ }
173
+ },
174
+ "qa-few": {
175
+ "preffix": "Are these two sentences entailed, contradicted or undetermined to each other? Please, answer in one word, with one of the following labels: <entailment>, <contradiction> or <neutral> Use exactly one of these three labels. Here you have some examples: Postal Service were to reduce delivery frequency -> The postal service could deliver less frequently: <entailment>. This elegant spa town on the edge of the Lac du Bourget has offered cures for rheumatism and other ailments for centuries -> The town was only established in the past fifty years: <contradiction>. And while we allow people to give a kidney to their child , we do not allow them to donate their heart -> You can't always donate organs to your child: <neutral>.",
176
+ "label_mapping": {
177
+ "entailment": "entailment",
178
+ "contradiction": "contradiction",
179
+ "neutral": "neutral"
180
+ }
181
+ },
182
+ "chain": {
183
+ "preffix": "You are an expert linguist and your task is to annotate sentences for the task of Natural Language Inference. This task consists in determining if a first sentence (premise) entails, contradicts or does not entail nor contradict the second sentence (hypothesis). Please, answer in one word, with one of the following labels: <entailment>, <contradiction> or <neutral> \n Use exactly one of these three labels \n Here you have a few examples:\n Premise: Postal Service were to reduce delivery frequency. \n Hypothesis: The postal service could deliver less frequently. \n Answer: <entailment> \n Premise: This elegant spa town on the edge of the Lac du Bourget has offered cures for rheumatism and other ailments for centuries. \n Hypothesis: The town was only established in the past fifty years. \n Answer: <contradiction> \n Premise: And while we allow people to give a kidney to their child , we do not allow them to donate their heart. \n Hypothesis: You can't always donate organs to your child. \n Answer: <neutral>",
184
+ "label_mapping": {
185
+ "entailment": "entailment",
186
+ "contradiction": "contradiction",
187
+ "neutral": "neutral"
188
+ }
189
+ }
190
+ },
191
+ "qa-zero": {
192
+ "entailment": {
193
+ "preffix": "Are these two sentences entailed? Please, answer between <yes> or <no>.",
194
+ "label_mapping": {
195
+ "yes": "entailment",
196
+ "no": "not_entailment"
197
+ }
198
+ },
199
+ "contradiction": {
200
+ "preffix": "Are these two sentences contradictions? Please, answer between <yes> or <no>.",
201
+ "label_mapping": {
202
+ "yes": "contradiction",
203
+ "no": "not_contradiction"
204
+ }
205
+ },
206
+ "neutral": {
207
+ "preffix": "Are these two sentences unrelated? Please, answer between <yes> or <no>.",
208
+ "label_mapping": {
209
+ "yes": "neutral",
210
+ "no": "not_neutral"
211
+ }
212
+ }
213
+ },
214
+ "qa-few": {
215
+ "entailment": {
216
+ "preffix": "Are these two sentences entailed? Please, answer between <yes> or <no>. Here you have some examples: Postal Service were to reduce delivery frequency -> The postal service could deliver less frequently: <yes>. This elegant spa town on the edge of the Lac du Bourget has offered cures for rheumatism and other ailments for centuries -> The town was only established in the past fifty years: <no>. And while we allow people to give a kidney to their child , we do not allow them to donate their heart -> You can't always donate organs to your child: <no>.",
217
+ "label_mapping": {
218
+ "yes": "entailment",
219
+ "no": "not_entailment"
220
+ }
221
+ },
222
+ "contradiction": {
223
+ "preffix": "Are these two sentences contradictions? Please, answer between <yes> or <no>. Here you have some examples: Postal Service were to reduce delivery frequency -> The postal service could deliver less frequently: <no>. This elegant spa town on the edge of the Lac du Bourget has offered cures for rheumatism and other ailments for centuries -> The town was only established in the past fifty years: <yes>. And while we allow people to give a kidney to their child , we do not allow them to donate their heart -> You can't always donate organs to your child: <no>.",
224
+ "label_mapping": {
225
+ "yes": "contradiction",
226
+ "no": "not_contradiction"
227
+ }
228
+ },
229
+ "neutral": {
230
+ "preffix": "Are these two sentences unrelated? Please, answer between <yes> or <no>. Here you have some examples: Postal Service were to reduce delivery frequency -> The postal service could deliver less frequently: <no>. This elegant spa town on the edge of the Lac du Bourget has offered cures for rheumatism and other ailments for centuries -> The town was only established in the past fifty years: <no>. And while we allow people to give a kidney to their child , we do not allow them to donate their heart -> You can't always donate organs to your child: <yes>.",
231
+ "label_mapping": {
232
+ "yes": "neutral",
233
+ "no": "not_neutral"
234
+ }
235
+ }
236
+ }
237
+ }
238
+ }
generative/requirements.txt ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==1.6.0
2
+ certifi==2025.4.26
3
+ charset-normalizer==3.4.2
4
+ filelock==3.18.0
5
+ fsspec==2025.3.2
6
+ huggingface-hub==0.31.2
7
+ idna==3.10
8
+ Jinja2==3.1.3
9
+ joblib==1.5.0
10
+ MarkupSafe==2.1.5
11
+ mpmath==1.3.0
12
+ networkx==3.3
13
+ numpy==2.2.5
14
+ nvidia-cublas-cu11==11.11.3.6
15
+ nvidia-cuda-cupti-cu11==11.8.87
16
+ nvidia-cuda-nvrtc-cu11==11.8.89
17
+ nvidia-cuda-runtime-cu11==11.8.89
18
+ nvidia-cudnn-cu11==9.1.0.70
19
+ nvidia-cufft-cu11==10.9.0.58
20
+ nvidia-curand-cu11==10.3.0.86
21
+ nvidia-cusolver-cu11==11.4.1.48
22
+ nvidia-cusparse-cu11==11.7.5.86
23
+ nvidia-nccl-cu11==2.21.5
24
+ nvidia-nvtx-cu11==11.8.86
25
+ packaging==25.0
26
+ pandas==2.2.3
27
+ pillow==11.0.0
28
+ psutil==7.0.0
29
+ python-dateutil==2.9.0.post0
30
+ pytz==2025.2
31
+ PyYAML==6.0.2
32
+ regex==2024.11.6
33
+ requests==2.32.3
34
+ safetensors==0.5.3
35
+ scikit-learn==1.6.1
36
+ scipy==1.15.3
37
+ six==1.17.0
38
+ sympy==1.13.3
39
+ threadpoolctl==3.6.0
40
+ tokenizers==0.21.1
41
+ torch==2.7.0+cu118
42
+ torchaudio==2.7.0+cu118
43
+ torchvision==0.22.0+cu118
44
+ tqdm==4.67.1
45
+ transformers @ git+https://github.com/huggingface/transformers@b311a3f50697c9602cc5d13a5faf7f6059c392ca
46
+ triton==3.3.0
47
+ typing_extensions==4.13.2
48
+ tzdata==2025.2
49
+ urllib3==2.4.0
generative/scripts/array-gemma.sh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --qos=regular
3
+ #SBATCH --job-name=xnli_gemmainstruct27
4
+ #SBATCH --cpus-per-task=4
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1
7
+ #SBATCH --mem=64GB
8
+ #SBATCH --gres=gpu:4
9
+ #SBATCH --constraint=a100-sxm4
10
+ #SBATCH --output=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli_gemmainstruct27_%a.log
11
+ #SBATCH --error=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli_gemmainstruct27_%a.err
12
+ #SBATCH --time=01:00:00 #ee-hh:mm:ss
13
+ #SBATCH --mail-type=REQUEUE
14
+ #SBATCH --mail-user=jaione.bengoetxea@ehu.eus
15
+ #SBATCH --array=0-8%2
16
+
17
+ source /scratch/jbengoetxea/phd/.gemma_env/bin/activate
18
+
19
+ export TRANSFORMERS_CACHE="/scratch/jbengoetxea/.cache"
20
+
21
+ # Values for the 2 loops:
22
+ # DATASET_VALUES=(xnli-eu-var xnli-eu-native xnli-eu xnli-es-var xnli-es-native xnli-es)
23
+ # PROMPT_TYPE_VALUES=(chain nli-zero nli-few qa-zero qa-few)
24
+
25
+
26
+ DATASET_VALUES=(xnli-eu-biz xnli-eu-gip xnli-eu-naf)
27
+ PROMPT_TYPE_VALUES=(chain nli-zero nli-few)
28
+
29
+
30
+ N=${#PROMPT_TYPE_VALUES[@]} # Number of items in the second level (VALUES2)
31
+
32
+ # Decode SLURM_ARRAY_TASK_ID to get the two indices
33
+ IDX1=$((SLURM_ARRAY_TASK_ID / N))
34
+ IDX2=$((SLURM_ARRAY_TASK_ID % N))
35
+
36
+ # Use IDX1 and IDX2 for your two-level loops
37
+ DATASET="${DATASET_VALUES[${IDX1}]}"
38
+ PROMPT_TYPE="${PROMPT_TYPE_VALUES[${IDX2}]}"
39
+
40
+
41
+ TASK=trilabel
42
+ MODEL=gemmainstruct27
43
+ OUTPUT=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/results/$DATASET/$MODEL
44
+
45
+ python3 /scratch/jbengoetxea/phd/XNLIvar/scripts/generative/scripts/zero_shot.py \
46
+ --dataset "${DATASET}" \
47
+ --model $MODEL \
48
+ --output_dir $OUTPUT \
49
+ --task $TASK \
50
+ --prompt_type "${PROMPT_TYPE}"
generative/scripts/array-latxa.sh ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --qos=regular
3
+ #SBATCH --job-name=xnli_latxainstruct70
4
+ #SBATCH --cpus-per-task=2
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1
7
+ #SBATCH --mem=64GB
8
+ #SBATCH --gres=gpu:4
9
+ #SBATCH --constraint=a100-sxm4
10
+ #SBATCH --output=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-eu-var_latxainstruct70_%a.log
11
+ #SBATCH --error=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-eu-var_latxainstruct70_%a.err
12
+ #SBATCH --time=01:00:00 #ee-hh:mm:ss
13
+ #SBATCH --mail-type=REQUEUE
14
+ #SBATCH --mail-user=jaione.bengoetxea@ehu.eus
15
+ #SBATCH --array=0-5%2
16
+
17
+ source /scratch/jbengoetxea/phd/.phd_venv_new/bin/activate
18
+
19
+ export TRANSFORMERS_CACHE="/scratch/jbengoetxea/.cache"
20
+
21
+ # Values for the 2 loops:
22
+ DATASET_VALUES=(xnli-eu-nat-biz xnli-eu-nat-gip xnli-eu-nat-naf)
23
+ PROMPT_TYPE_VALUES=(nli-few nli-zero)
24
+
25
+
26
+ N=${#PROMPT_TYPE_VALUES[@]} # Number of items in the second level (VALUES2)
27
+
28
+ # Decode SLURM_ARRAY_TASK_ID to get the two indices
29
+ IDX1=$((SLURM_ARRAY_TASK_ID / N))
30
+ IDX2=$((SLURM_ARRAY_TASK_ID % N))
31
+
32
+ # Use IDX1 and IDX2 for your two-level loops
33
+ DATASET="${DATASET_VALUES[${IDX1}]}"
34
+ PROMPT_TYPE="${PROMPT_TYPE_VALUES[${IDX2}]}"
35
+
36
+
37
+ TASK=trilabel
38
+ MODEL=latxainstruct70
39
+ OUTPUT=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/results/$DATASET/$MODEL
40
+
41
+ python3 /scratch/jbengoetxea/phd/XNLIvar/scripts/generative/scripts/zero_shot.py \
42
+ --dataset "${DATASET}" \
43
+ --model $MODEL \
44
+ --output_dir $OUTPUT \
45
+ --task $TASK \
46
+ --prompt_type "${PROMPT_TYPE}"
generative/scripts/array-llama.sh ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --qos=regular
3
+ #SBATCH --job-name=xnli_llamainstruct70
4
+ #SBATCH --cpus-per-task=2
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1
7
+ #SBATCH --mem=64GB
8
+ #SBATCH --gres=gpu:4
9
+ #SBATCH --constraint=a100-sxm4
10
+ #SBATCH --output=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-llamainstruct70_%a.log
11
+ #SBATCH --error=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-llamainstruct70_%a.err
12
+ #SBATCH --time=01:00:00 #ee-hh:mm:ss
13
+ #SBATCH --mail-type=REQUEUE
14
+ #SBATCH --mail-user=jaione.bengoetxea@ehu.eus
15
+ #SBATCH --array=0-5%2
16
+
17
+ source /scratch/jbengoetxea/phd/.phd_venv_new/bin/activate
18
+
19
+ export TRANSFORMERS_CACHE="/scratch/jbengoetxea/.cache"
20
+
21
+ # Values for the 2 loops:
22
+ DATASET_VALUES=(xnli-eu-nat-biz xnli-eu-nat-gip xnli-eu-nat-naf)
23
+ PROMPT_TYPE_VALUES=(nli-few nli-zero)
24
+
25
+
26
+ N=${#PROMPT_TYPE_VALUES[@]} # Number of items in the second level (VALUES2)
27
+
28
+ # Decode SLURM_ARRAY_TASK_ID to get the two indices
29
+ IDX1=$((SLURM_ARRAY_TASK_ID / N))
30
+ IDX2=$((SLURM_ARRAY_TASK_ID % N))
31
+
32
+ # Use IDX1 and IDX2 for your two-level loops
33
+ DATASET="${DATASET_VALUES[${IDX1}]}"
34
+ PROMPT_TYPE="${PROMPT_TYPE_VALUES[${IDX2}]}"
35
+
36
+
37
+ TASK=trilabel
38
+ MODEL=llama3instruct70
39
+ OUTPUT=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/results/$DATASET/$MODEL
40
+
41
+ python3 /scratch/jbengoetxea/phd/XNLIvar/scripts/generative/scripts/zero_shot.py \
42
+ --dataset "${DATASET}" \
43
+ --model $MODEL \
44
+ --output_dir $OUTPUT \
45
+ --task $TASK \
46
+ --prompt_type "${PROMPT_TYPE}"
generative/scripts/get_accuracy.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from sklearn.metrics import accuracy_score
3
+ import argparse
4
+ import os
5
+
6
+ def process_tsv_files(parent_dir, output_csv, is_qa=None):
7
+ results = []
8
+
9
+ # Walk through all subdirectories
10
+ for root, _, files in os.walk(parent_dir):
11
+ for file in files:
12
+ if file.endswith(".tsv"): # Check for TSV files
13
+ file_path = os.path.join(root, file)
14
+
15
+ try:
16
+ df = pd.read_csv(file_path, sep="\t")
17
+ # Evaluate non-qa inference results
18
+ if is_qa is None:
19
+ if "gold_label" in df.columns and "prediction" in df.columns:
20
+ y_gold = df["gold_label"].tolist()
21
+ y_pred = df["prediction"].tolist()
22
+
23
+ accuracy = accuracy_score(y_gold, y_pred)
24
+ results.append([file_path, accuracy])
25
+ else:
26
+ print(f"Skipping {file_path}: Required columns not found.")
27
+
28
+ # Evaluate qa inference results
29
+ elif is_qa == "y":
30
+ for neg_value in df["prediction"].unique():
31
+ if neg_value.startswith("not_"):
32
+ evaluating_label = neg_value.replace("not_", "")
33
+
34
+ # Replace all other labels in gold_label with the negated version
35
+ df["gold_label"] = df["gold_label"].apply(
36
+ lambda x: x if x == evaluating_label else neg_value
37
+ )
38
+
39
+ if "gold_label" in df.columns and "prediction" in df.columns:
40
+ y_gold = df["gold_label"].tolist()
41
+ y_pred = df["prediction"].tolist()
42
+
43
+ accuracy = accuracy_score(y_gold, y_pred)
44
+ results.append([file_path, accuracy])
45
+ else:
46
+ print(f"Skipping {file_path}: Required columns not found.")
47
+
48
+ except Exception as e:
49
+ print(f"Error processing {file_path}: {e}")
50
+
51
+ # Save results to a CSV
52
+ results_df = pd.DataFrame(results, columns=["File Path", "Accuracy"])
53
+ results_df.to_csv(output_csv, index=False)
54
+ print(f"Results saved to {output_csv}")
55
+
56
+ def main():
57
+ parser = argparse.ArgumentParser()
58
+ parser.add_argument("--parent_dir", type=str, required=True, help="Path to the parent folder")
59
+ parser.add_argument("--output_csv", type=str, required=True, help="Path to the output CSV file")
60
+ parser.add_argument("--is_qa", type=str, help="Are we evaluating qa inference?")
61
+ args = parser.parse_args()
62
+
63
+ process_tsv_files(args.parent_dir, args.output_csv, args.is_qa)
64
+
65
+ if __name__ == "__main__":
66
+ main()
generative/scripts/qa-array-gemma.sh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --partition=hitz-exclusive
3
+ #SBATCH --account=hitz-exclusive
4
+ #SBATCH --job-name=xnli_gemmainstruct27
5
+ #SBATCH --cpus-per-task=2
6
+ #SBATCH --nodes=1
7
+ #SBATCH --ntasks-per-node=1
8
+ #SBATCH --mem=64GB
9
+ #SBATCH --gres=gpu:2
10
+ #SBATCH --constraint=a100-sxm4
11
+ #SBATCH --output=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-gemmainstruct27_%a.log
12
+ #SBATCH --error=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-gemmainstruct27_%a.err
13
+ #SBATCH --time=01:00:00 #ee-hh:mm:ss
14
+ #SBATCH --mail-type=REQUEUE
15
+ #SBATCH --mail-user=jaione.bengoetxea@ehu.eus
16
+ #SBATCH --array=0-35%2
17
+
18
+
19
+ export TRANSFORMERS_CACHE="/scratch/jbengoetxea/.cache"
20
+
21
+ # Values
22
+ DATASET_VALUES=(xnli-eu-var xnli-eu-native xnli-eu xnli-es-var xnli-es-native xnli-es)
23
+ PROMPT_TYPE_VALUES=(contradiction entailment neutral)
24
+ TASK_VALUES=(qa-zero qa-few)
25
+
26
+ # Get job array working
27
+ D=${#DATASET_VALUES[@]}
28
+ P=${#PROMPT_TYPE_VALUES[@]}
29
+ T=${#TASK_VALUES[@]}
30
+
31
+ TASK_ID=$SLURM_ARRAY_TASK_ID
32
+
33
+ IDX_D=$((TASK_ID / (P * T)))
34
+ IDX_P=$(((TASK_ID / T) % P))
35
+ IDX_T=$((TASK_ID % T))
36
+
37
+ DATASET="${DATASET_VALUES[$IDX_D]}"
38
+ PROMPT_TYPE="${PROMPT_TYPE_VALUES[$IDX_P]}"
39
+ TASK="${TASK_VALUES[$IDX_T]}"
40
+
41
+ # Final values and run script
42
+ MODEL=gemmainstruct27
43
+ OUTPUT=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/results/$DATASET/$MODEL/$TASK
44
+
45
+ TORCHDYNAMO_DISABLE=1 python3 /scratch/jbengoetxea/phd/XNLIvar/scripts/generative/scripts/zero_shot.py \
46
+ --dataset "${DATASET}" \
47
+ --model $MODEL \
48
+ --output_dir $OUTPUT \
49
+ --task $TASK \
50
+ --prompt_type "${PROMPT_TYPE}"
generative/scripts/qa-array-latxa.sh ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --partition=hitz-exclusive
3
+ #SBATCH --account=hitz-exclusive
4
+ #SBATCH --job-name=xnli_latxainstruct70
5
+ #SBATCH --cpus-per-task=2
6
+ #SBATCH --nodes=1
7
+ #SBATCH --ntasks-per-node=1
8
+ #SBATCH --mem=64GB
9
+ #SBATCH --gres=gpu:2
10
+ #SBATCH --constraint=a100-sxm4
11
+ #SBATCH --output=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-latxainstruct70_%a.log
12
+ #SBATCH --error=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-latxainstruct70_%a.err
13
+ #SBATCH --time=01:00:00 #ee-hh:mm:ss
14
+ #SBATCH --mail-type=REQUEUE
15
+ #SBATCH --mail-user=jaione.bengoetxea@ehu.eus
16
+ #SBATCH --array=0-10%2
17
+
18
+ source /scratch/jbengoetxea/phd/.phd_venv_new/bin/activate
19
+
20
+ export TRANSFORMERS_CACHE="/scratch/jbengoetxea/.cache"
21
+
22
+ # Values
23
+ DATASET_VALUES=(xnli-eu-biz xnli-eu-gip xnli-eu-naf xnli-eu-nat-biz xnli-eu-nat-gip xnli-eu-nat-naf)
24
+ PROMPT_TYPE_VALUES=(contradiction entailment neutral)
25
+ TASK_VALUES=(qa-zero qa-few)
26
+
27
+ # Get job array working
28
+ D=${#DATASET_VALUES[@]}
29
+ P=${#PROMPT_TYPE_VALUES[@]}
30
+ T=${#TASK_VALUES[@]}
31
+
32
+ TASK_ID=$SLURM_ARRAY_TASK_ID
33
+
34
+ IDX_D=$((TASK_ID / (P * T)))
35
+ IDX_P=$(((TASK_ID / T) % P))
36
+ IDX_T=$((TASK_ID % T))
37
+
38
+ DATASET="${DATASET_VALUES[$IDX_D]}"
39
+ PROMPT_TYPE="${PROMPT_TYPE_VALUES[$IDX_P]}"
40
+ TASK="${TASK_VALUES[$IDX_T]}"
41
+
42
+ # Final values and run script
43
+ MODEL=latxainstruct70
44
+ OUTPUT=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/results/$DATASET/$MODEL/$TASK
45
+
46
+ python3 /scratch/jbengoetxea/phd/XNLIvar/scripts/generative/scripts/zero_shot.py \
47
+ --dataset "${DATASET}" \
48
+ --model $MODEL \
49
+ --output_dir $OUTPUT \
50
+ --task $TASK \
51
+ --prompt_type "${PROMPT_TYPE}"
generative/scripts/qa-array-llama.sh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --qos=regular
3
+ #SBATCH --job-name=xnli_llamainstruct70
4
+ #SBATCH --cpus-per-task=2
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1
7
+ #SBATCH --mem=64GB
8
+ #SBATCH --gres=gpu:4
9
+ #SBATCH --constraint=a100-sxm4
10
+ #SBATCH --output=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-llamainstruct70_%a.log
11
+ #SBATCH --error=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/logs/xnli-llamainstruct70_%a.err
12
+ #SBATCH --time=01:00:00 #ee-hh:mm:ss
13
+ #SBATCH --mail-type=REQUEUE
14
+ #SBATCH --mail-user=jaione.bengoetxea@ehu.eus
15
+ #SBATCH --array=0-10%2
16
+
17
+ source /scratch/jbengoetxea/phd/.phd_venv_new/bin/activate
18
+
19
+ export TRANSFORMERS_CACHE="/scratch/jbengoetxea/.cache"
20
+
21
+ # Values
22
+ DATASET_VALUES=(xnli-eu-biz xnli-eu-gip xnli-eu-naf xnli-eu-nat-biz xnli-eu-nat-gip xnli-eu-nat-naf)
23
+ PROMPT_TYPE_VALUES=(contradiction entailment neutral)
24
+ TASK_VALUES=(qa-zero qa-few)
25
+
26
+ # Get job array working
27
+ D=${#DATASET_VALUES[@]}
28
+ P=${#PROMPT_TYPE_VALUES[@]}
29
+ T=${#TASK_VALUES[@]}
30
+
31
+ TASK_ID=$SLURM_ARRAY_TASK_ID
32
+
33
+ IDX_D=$((TASK_ID / (P * T)))
34
+ IDX_P=$(((TASK_ID / T) % P))
35
+ IDX_T=$((TASK_ID % T))
36
+
37
+ DATASET="${DATASET_VALUES[$IDX_D]}"
38
+ PROMPT_TYPE="${PROMPT_TYPE_VALUES[$IDX_P]}"
39
+ TASK="${TASK_VALUES[$IDX_T]}"
40
+
41
+ # Final values and run script
42
+ MODEL=llama3instruct70
43
+ OUTPUT=/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/results/$DATASET/$MODEL/$TASK
44
+
45
+ python3 /scratch/jbengoetxea/phd/XNLIvar/scripts/generative/scripts/zero_shot.py \
46
+ --dataset "${DATASET}" \
47
+ --model $MODEL \
48
+ --output_dir $OUTPUT \
49
+ --task $TASK \
50
+ --prompt_type "${PROMPT_TYPE}"
generative/scripts/zero_shot.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
2
+ import torch
3
+ from huggingface_hub import login
4
+ import re
5
+ import sys
6
+ from sklearn.metrics import accuracy_score
7
+ import argparse
8
+ import json
9
+ import pathlib
10
+ from typing import List, Dict
11
+ import numpy as np
12
+ import pandas as pd
13
+ import logging
14
+ from datetime import datetime
15
+ import os
16
+
17
+ MAX_NEW_TOKENS = 5
18
+ TEMPERATURE = 0.3
19
+
20
+
21
+ with open("/scratch/jbengoetxea/phd/XNLIvar/scripts/generative/config.json", "r") as f:
22
+ config = json.load(f)
23
+
24
+ def parse_args():
25
+ #os.environ['TRANSFORMERS_CACHE'] = '/XXXX-7/users/XXXX-1/metaphor_LLMs/paraphrase_gen/.cache/huggingface/hub'
26
+
27
+ parser = argparse.ArgumentParser(
28
+ description="Finetune a transformers model on a text classification task"
29
+ )
30
+
31
+ parser.add_argument(
32
+ "--dataset",
33
+ type=str,
34
+ default=None,
35
+ required=True,
36
+ help="Name of the dataset to predict gold_labels",
37
+ choices=["xnli-eu-native", "xnli-eu-var", "xnli-es-native", "xnli-es-var", "xnli-en", "xnli-es", "xnli-eu", "xnli-es-var-no-rep", "xnli-eu-var-no-rep", "xnli-eu-var-less-biz", "xnli-eu-var-less-gip", "xnli-eu-biz", "xnli-eu-gip", "xnli-eu-naf", "xnli-eu-nat-biz", "xnli-eu-nat-gip", "xnli-eu-nat-naf"]
38
+ )
39
+
40
+ parser.add_argument(
41
+ "--model",
42
+ type=str,
43
+ default=None,
44
+ required=True,
45
+ help="Model name in config",
46
+ choices=["llama3instruct8", "llama3instruct70", "gemmainstruct9", "gemmainstruct27", "latxainstruct70", "llama3base70"]
47
+ )
48
+
49
+ parser.add_argument(
50
+ "--output_dir",
51
+ type=str,
52
+ default=None,
53
+ required=True,
54
+ help="Output path to dump predictions"
55
+ )
56
+
57
+ parser.add_argument(
58
+ "--task",
59
+ type=str,
60
+ default=None,
61
+ required=True,
62
+ help="Type of task formulation",
63
+ choices=["binary", "trilabel", "qa-zero", "qa-few"]
64
+ )
65
+
66
+ parser.add_argument(
67
+ "--prompt_type",
68
+ type=str,
69
+ default=None,
70
+ required=True,
71
+ help="Type of prompt"
72
+ )
73
+
74
+ parser.add_argument(
75
+ "--paraphrases",
76
+ action="store_true",
77
+ required=False,
78
+ help="Dataset with paraphrases generated automatically"
79
+ )
80
+
81
+ parser.add_argument(
82
+ "--paraphrase_source",
83
+ type=str,
84
+ default=None,
85
+ required=False,
86
+ help="Model used to generate paraphrases"
87
+ )
88
+
89
+ args = parser.parse_args()
90
+
91
+ return args
92
+
93
+ def load_dataset(data_path: str) -> pd.DataFrame:
94
+ df = None
95
+ extension = pathlib.Path(data_path).suffix
96
+ if extension.endswith("json"):
97
+ df = pd.read_json(data_path)
98
+ elif extension.endswith("jsonl"):
99
+ df = pd.read_json(data_path, lines=True)
100
+ elif extension.endswith("tsv"):
101
+ df = pd.read_csv(data_path, sep="\t")
102
+ else:
103
+ df = pd.read_csv(data_path)
104
+
105
+ return df
106
+
107
+ def dump_predictions(out_path: str, premises: List, hypotheses: List, gold_labels: List, predictions: List, paraphrased_sents=None):
108
+ if paraphrased_sents:
109
+ with open(out_path, "w") as o:
110
+ o.write("premise\thypothesis\tgold_label\tprediction\tparaphrased_sentence\n")
111
+ for p, h, g, pr, paraph in zip(premises, hypotheses, gold_labels, predictions, paraphrased_sents):
112
+ o.write(f"{p}\t{h}\t{g}\t{pr}\t{paraph}\n")
113
+ else:
114
+ with open(out_path, "w") as o:
115
+ o.write("premise\thypothesis\tgold_label\tprediction\n")
116
+ for p, h, g, pr in zip(premises, hypotheses, gold_labels, predictions):
117
+ o.write(f"{p}\t{h}\t{g}\t{pr}\n")
118
+
119
+ print(f"{len(predictions)} Predictions stored in {out_path}")
120
+
121
+ def map_labels(predictions: List[str], label_mapping: Dict):
122
+ predictions_clean = [pred.strip("<>.,") for pred in predictions.lower().split()]
123
+ for pred in predictions_clean:
124
+ for label in label_mapping:
125
+ label_lower = label.lower()
126
+ # Allow partial matching in both directions
127
+ if pred in label_lower or label_lower in pred:
128
+ return label_mapping[label]
129
+ return "unk"
130
+
131
+ def get_column_values(df, col_id):
132
+ return df[col_id].tolist()
133
+
134
+
135
+ def map_labels_to_string(labels: List):
136
+ label_strings = []
137
+ for label in labels:
138
+ if label == 0:
139
+ label_strings.append("entailment")
140
+ elif label == 1:
141
+ label_strings.append("neutral")
142
+ else:
143
+ label_strings.append("contradiction")
144
+
145
+ return label_strings
146
+
147
+ def main():
148
+
149
+ args = parse_args()
150
+
151
+ if not os.path.exists(args.output_dir):
152
+ os.makedirs(args.output_dir, exist_ok=True)
153
+
154
+ logger_path = os.path.join(args.output_dir, f"{args.prompt_type}_{args.paraphrase_source+'_' if args.paraphrase_source else ''}{datetime.now().strftime('%d-%m-%Y_%H_%M_%S')}.log")
155
+
156
+ logger = logging.getLogger(__name__)
157
+ logging.basicConfig(filename=os.path.join(logger_path), encoding='utf-8', level=logging.INFO)
158
+
159
+ # Disable compilation (to avoid recompile_limit errors)
160
+ #torch._dynamo.disable()
161
+ #torch._dynamo.config.suppress_errors = True
162
+ #torch._dynamo.config.recompile_limit = 100
163
+
164
+ login(token='LOGIN_TOKEN') # Add hf login token
165
+ model_id = config.get("models", {}).get(args.model, "")
166
+ logger.info(f"Model used: {model_id}")
167
+ logger.info(f"Prompt task: {args.task}")
168
+ logger.info(f"Dataset with paraphrases: {args.paraphrases}")
169
+ logger.info(f"Prompt config: {args.prompt_type}")
170
+ device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
171
+ logger.info(f"Device in use: {device}")
172
+
173
+
174
+ # TORCH_LOGS=recompiles
175
+
176
+ datasets_config = config.get("datasets", {})
177
+ prompt_config = config.get("prompts", {}).get(args.task, {})
178
+
179
+ print(args.task)
180
+ print(datasets_config.get(args.dataset, {}).get("prompts", []))
181
+
182
+ # Ensure trilabel setup only for Meta4XNLI
183
+ assert args.task in datasets_config.get(args.dataset, {}).get("prompts", [])
184
+
185
+ if args.paraphrases:
186
+ data_path = datasets_config.get(args.dataset, {}).get("data_path_paraphrase", "")
187
+ else:
188
+ data_path = datasets_config.get(args.dataset, {}).get("data_path", "")
189
+
190
+ logger.info(f"Dataset loaded from: {data_path}")
191
+ df = load_dataset(data_path)
192
+ logger.info(f"Loaded samples: {len(df)}")
193
+ premises = get_column_values(df, datasets_config.get(args.dataset, "").get("prem_col", ""))
194
+ hypotheses = get_column_values(df, datasets_config.get(args.dataset, "").get("hyp_col", ""))
195
+ if args.paraphrases:
196
+ gold_labels = get_column_values(df, "gold_label")
197
+ else:
198
+ gold_labels = [l for l in get_column_values(df, datasets_config.get(args.dataset, "").get("label_col", ""))]
199
+
200
+ print(gold_labels)
201
+
202
+ gold_labels = map_labels_to_string(gold_labels)
203
+ print(gold_labels)
204
+
205
+ labels = list(set(gold_labels))
206
+
207
+ set_seed(5)
208
+
209
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
210
+
211
+
212
+ print("MODEL ID:", model_id)
213
+
214
+
215
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
216
+
217
+ print("here i am")
218
+
219
+ tokenizer.pad_token_id = tokenizer.eos_token_id
220
+
221
+ predictions = []
222
+ for p, h, l in zip(premises, hypotheses, gold_labels):
223
+ preffix_prompt = prompt_config.get(args.prompt_type, {}).get("preffix", "")
224
+ print(preffix_prompt)
225
+ print(args.prompt_type)
226
+ if args.prompt_type == "chain":
227
+ prompt = preffix_prompt + f"\n Premise: {p}\n Hypothesis: {h}\n Answer: "
228
+ logger.info(f"Prompt: {prompt}")
229
+ else:
230
+ prompt = preffix_prompt + f" {p} -> {h}: "
231
+ logger.info(f"Prompt: {prompt}")
232
+
233
+
234
+ label_mappings = prompt_config.get(args.prompt_type, {}).get("label_mapping")
235
+
236
+ logger.info(f"Label mappings: {label_mappings}")
237
+
238
+ inputs = tokenizer([prompt], return_tensors="pt").to(device)
239
+
240
+ logger.info(f"{p}\t{h}\t{l}")
241
+
242
+
243
+
244
+
245
+
246
+ # #################
247
+ # input_text = "Write me a poem about Machine Learning."
248
+ # input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
249
+
250
+ # outputs = model.generate(**input_ids, max_new_tokens=32)
251
+ # print(tokenizer.decode(outputs[0]))
252
+ # #################
253
+
254
+
255
+
256
+
257
+ outputs = model.generate(**inputs, max_new_tokens=MAX_NEW_TOKENS, return_dict_in_generate=True, output_scores=True, temperature=TEMPERATURE)
258
+
259
+ transition_scores = model.compute_transition_scores(
260
+ outputs.sequences, outputs.scores, normalize_logits=True
261
+ )
262
+ logger.info(f"{outputs.sequences}\t{outputs.scores}")
263
+
264
+
265
+ #print(f"transition scores: {transition_scores}", flush=True)
266
+
267
+ #print(f"transition scores: {transition_scores}", flush=True)
268
+ # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for
269
+ # encoder-decoder models, like BART or T5.
270
+ input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
271
+ generated_tokens = outputs.sequences[:, input_length:]
272
+ for tok, score in zip(generated_tokens[0], transition_scores[0]):
273
+ # | token | token string | log probability | probability
274
+ logger.info(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score}")
275
+ #logger.info(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}")
276
+ #o.write(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}")
277
+
278
+
279
+ answers = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
280
+ logger.info(f"Answers: {answers}, split: {answers.split()}")
281
+ logger.info(f"Mapped label: {map_labels(answers, label_mappings)}")
282
+ predictions.append(map_labels(answers, label_mappings))
283
+ logger.info("Label added to predictions.")
284
+
285
+
286
+ logger.debug(gold_labels[:5], predictions[:5], flush=True)
287
+ assert len(gold_labels) == len(predictions)
288
+ logger.info(f"Gold: {len(gold_labels)}, Pred: {len(predictions)}")
289
+
290
+
291
+ predictions_path = os.path.join(args.output_dir, f"{args.prompt_type}_{args.paraphrase_source+'_' if args.paraphrase_source else ''}{datetime.now().strftime('%d-%m-%Y_%H_%M_%S')}.tsv")
292
+
293
+ if args.paraphrases:
294
+ paraphrased_sents = df.iloc[:, -1].tolist()
295
+ logger.info(f"Dumping predictions with paraphrased sentences, met location: {list(df.columns)[-1]}")
296
+ dump_predictions(predictions_path, premises, hypotheses, gold_labels, predictions, paraphrased_sents)
297
+ else:
298
+ dump_predictions(predictions_path, premises, hypotheses, gold_labels, predictions)
299
+
300
+ logger.info(f"Predictions dumped to {predictions_path}")
301
+
302
+
303
+
304
+ accuracy = accuracy_score(gold_labels, predictions, normalize=True)
305
+ logger.info(f"Accuracy {len(gold_labels)}, {len(predictions)}: {accuracy}\n")
306
+
307
+
308
+
309
+ if __name__ == "__main__":
310
+ main()