mrm8488's picture
First commit
c32ee7d
raw
history blame
4.86 kB
dataset: codah
subset: fold_3
templates:
15861c29-a2f1-4165-8849-83b88320fc3d: !Template
answer_choices: null
id: 15861c29-a2f1-4165-8849-83b88320fc3d
jinja: '{{question_propmt}}
Candidate answers:
- {{ candidate_answers | join("\n- ") }}
Finish the sentence with the correct answer
|||
{{candidate_answers[correct_answer_idx]}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: true
name: finish_from_the_list_post
reference: ''
9ab1a3e6-6c03-4c37-9a85-d8128dc92545: !Template
answer_choices: null
id: 9ab1a3e6-6c03-4c37-9a85-d8128dc92545
jinja: '{{question_propmt}}
Candidate answers:
- {{ candidate_answers | join("\n- ") }}
|||
{{candidate_answers[correct_answer_idx]}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: true
name: answer_with_option
reference: ''
9efbda8e-19f8-47fb-907a-d19c660b0ab8: !Template
answer_choices: null
id: 9efbda8e-19f8-47fb-907a-d19c660b0ab8
jinja: 'Finish the following text:
{{question_propmt}}
|||
{{candidate_answers[correct_answer_idx]}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: null
name: finish_pre
reference: ''
9fecf40b-f96f-4124-80b0-038d5e58784c: !Template
answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
||| Others
id: 9fecf40b-f96f-4124-80b0-038d5e58784c
jinja: "{{question_propmt}}\n\nPossible categories for this sentence are {{',\
\ '.join(answer_choices)}}.\n\nFrom which category does the sentence belong?\
\ \n|||\n{{answer_choices[question_category]}}"
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: null
name: question_category
reference: ''
a53e444c-da0d-4159-8488-35858b239d3d: !Template
answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
||| Others
id: a53e444c-da0d-4159-8488-35858b239d3d
jinja: '{{question_propmt}}
Which of {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
" or Others"])}} best describes the text?
|||
{{answer_choices[question_category]}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: null
name: question_category_bis
reference: ''
ce98e4d9-7eca-4101-8299-fb074b52d279: !Template
answer_choices: null
id: ce98e4d9-7eca-4101-8299-fb074b52d279
jinja: '{{question_propmt}}
{% for k in range(candidate_answers | length) %}
{{'' -> ''.join([(k + 1) | string, candidate_answers[k]])}}
{% endfor %}
Is the right answer {{"1, 2, 3 or 4"}} ?
|||
{{correct_answer_idx}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: true
name: answer_with_option_idx
reference: ''
dd7a60fc-bec9-473b-b00d-f52c31c30b1c: !Template
answer_choices: null
id: dd7a60fc-bec9-473b-b00d-f52c31c30b1c
jinja: '{{question_propmt}}
Finish the sentence
|||
{{candidate_answers[correct_answer_idx]}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: null
name: answer_no_option
reference: ''
eb354069-d3ae-4707-b355-6a15b709e454: !Template
answer_choices: null
id: eb354069-d3ae-4707-b355-6a15b709e454
jinja: '{{question_propmt}}
Finish the sentence using one of the following endings:
- {{ candidate_answers | join("\n- ") }}
|||
{{candidate_answers[correct_answer_idx]}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: true
name: finish_from_the_list
reference: ''
f0fe6482-d937-42ee-bb71-72c8e8ffdf7e: !Template
answer_choices: null
id: f0fe6482-d937-42ee-bb71-72c8e8ffdf7e
jinja: '{{question_propmt}}
- {{ candidate_answers | join("\n- ") }}
Which is the correct ending?
|||
{{candidate_answers[correct_answer_idx]}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: true
name: answer_with_option_post
reference: ''
fc1c9b44-e4ef-4ab1-adc2-496fe97ac01e: !Template
answer_choices: null
id: fc1c9b44-e4ef-4ab1-adc2-496fe97ac01e
jinja: '{{question_propmt}}
Choose a candidate ending from this list:
- {{ candidate_answers | join("\n- ") }}
|||
{{candidate_answers[correct_answer_idx]}}'
metadata: !TemplateMetadata
choices_in_prompt: null
metrics: []
original_task: true
name: choose_from_list
reference: ''