diff --git a/README.md b/README.md index 8bab298a1a09b5513aca562cdfd31223bd2457b5..152982bce01656992e12b462d48143f09e3c23c2 100644 --- a/README.md +++ b/README.md @@ -1,78 +1,404 @@ + --- -language: - - en -tags: -- question generation -license: mit -datasets: -- asahi417/qg_squad +license: cc-by-4.0 metrics: -- bleu +- bleu4 - meteor -- rouge +- rouge-l - bertscore - moverscore +language: en +datasets: +- lmqg/qg_squad +pipeline_tag: text2text-generation +tags: +- question generation widget: -- text: " Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." - example_title: "Example 1" -- text: "Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." - example_title: "Example 2" -- text: "Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records ." - example_title: "Example 3" +- text: "generate question: Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." + example_title: "Question Generation Example 1" +- text: "generate question: Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." + example_title: "Question Generation Example 2" +- text: "generate question: Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records ." + example_title: "Question Generation Example 3" +model-index: +- name: lmqg/bart-large-squad + results: + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_squad + type: default + args: default + metrics: + - name: BLEU4 + type: bleu4 + value: 0.26168385362299557 + - name: ROUGE-L + type: rouge-l + value: 0.5384959163821219 + - name: METEOR + type: meteor + value: 0.27073122286541956 + - name: BERTScore + type: bertscore + value: 0.9100413219045603 + - name: MoverScore + type: moverscore + value: 0.6499011626820898 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_squadshifts + type: reddit + args: reddit + metrics: + - name: BLEU4 + type: bleu4 + value: 0.059525104157825456 + - name: ROUGE-L + type: rouge-l + value: 0.22365090580055863 + - name: METEOR + type: meteor + value: 0.21499800504546457 + - name: BERTScore + type: bertscore + value: 0.9095144685254328 + - name: MoverScore + type: moverscore + value: 0.6059332247878408 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_squadshifts + type: new_wiki + args: new_wiki + metrics: + - name: BLEU4 + type: bleu4 + value: 0.11118273173452982 + - name: ROUGE-L + type: rouge-l + value: 0.2967546690273089 + - name: METEOR + type: meteor + value: 0.27315087810722966 + - name: BERTScore + type: bertscore + value: 0.9322739617807421 + - name: MoverScore + type: moverscore + value: 0.6623000084761579 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_subjqa + type: tripadvisor + args: tripadvisor + metrics: + - name: BLEU4 + type: bleu4 + value: 8.380171318718442e-07 + - name: ROUGE-L + type: rouge-l + value: 0.1402922852924756 + - name: METEOR + type: meteor + value: 0.1372146070365174 + - name: BERTScore + type: bertscore + value: 0.8891002409937424 + - name: MoverScore + type: moverscore + value: 0.5604572211470809 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_squadshifts + type: default + args: default + metrics: + - name: BLEU4 + type: bleu4 + value: 0.07839941048417529 + - name: ROUGE-L + type: rouge-l + value: 0.25357667226247294 + - name: METEOR + type: meteor + value: 0.24046838149047955 + - name: BERTScore + type: bertscore + value: 0.9182198703598111 + - name: MoverScore + type: moverscore + value: 0.6274693859765924 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_squadshifts + type: nyt + args: nyt + metrics: + - name: BLEU4 + type: bleu4 + value: 0.08117757543966063 + - name: ROUGE-L + type: rouge-l + value: 0.25292097720734297 + - name: METEOR + type: meteor + value: 0.25254205113198686 + - name: BERTScore + type: bertscore + value: 0.9249009759439454 + - name: MoverScore + type: moverscore + value: 0.6406329128556304 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_subjqa + type: restaurants + args: restaurants + metrics: + - name: BLEU4 + type: bleu4 + value: 1.1301750984972448e-06 + - name: ROUGE-L + type: rouge-l + value: 0.13083168975354642 + - name: METEOR + type: meteor + value: 0.12419733006916912 + - name: BERTScore + type: bertscore + value: 0.8797711839570719 + - name: MoverScore + type: moverscore + value: 0.5542757411268555 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_subjqa + type: electronics + args: electronics + metrics: + - name: BLEU4 + type: bleu4 + value: 0.00866799444965211 + - name: ROUGE-L + type: rouge-l + value: 0.1601628874804186 + - name: METEOR + type: meteor + value: 0.15348605312210778 + - name: BERTScore + type: bertscore + value: 0.8783386920680519 + - name: MoverScore + type: moverscore + value: 0.5634845371093992 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_subjqa + type: books + args: books + metrics: + - name: BLEU4 + type: bleu4 + value: 0.006278914808207679 + - name: ROUGE-L + type: rouge-l + value: 0.12368226019088967 + - name: METEOR + type: meteor + value: 0.11576293675813865 + - name: BERTScore + type: bertscore + value: 0.8807110440044503 + - name: MoverScore + type: moverscore + value: 0.5555905941686486 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_subjqa + type: movies + args: movies + metrics: + - name: BLEU4 + type: bleu4 + value: 1.0121579426501661e-06 + - name: ROUGE-L + type: rouge-l + value: 0.12508697028506718 + - name: METEOR + type: meteor + value: 0.11862284941640638 + - name: BERTScore + type: bertscore + value: 0.8748829724726739 + - name: MoverScore + type: moverscore + value: 0.5528899173535703 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_subjqa + type: grocery + args: grocery + metrics: + - name: BLEU4 + type: bleu4 + value: 0.00528043272450429 + - name: ROUGE-L + type: rouge-l + value: 0.12343711316491492 + - name: METEOR + type: meteor + value: 0.15133496445452477 + - name: BERTScore + type: bertscore + value: 0.8778951253890991 + - name: MoverScore + type: moverscore + value: 0.5701949938103265 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_squadshifts + type: amazon + args: amazon + metrics: + - name: BLEU4 + type: bleu4 + value: 0.06530369842068952 + - name: ROUGE-L + type: rouge-l + value: 0.25030985091008146 + - name: METEOR + type: meteor + value: 0.2229994442645732 + - name: BERTScore + type: bertscore + value: 0.9092814804525936 + - name: MoverScore + type: moverscore + value: 0.6086538514008419 + - task: + name: Text2text Generation + type: text2text-generation + dataset: + name: lmqg/qg_subjqa + type: default + args: default + metrics: + - name: BLEU4 + type: bleu4 + value: 0.005121882223046874 + - name: ROUGE-L + type: rouge-l + value: 0.1346485324169255 + - name: METEOR + type: meteor + value: 0.13733272662214893 + - name: BERTScore + type: bertscore + value: 0.8811488576438816 + - name: MoverScore + type: moverscore + value: 0.5614233235005509 --- -# BART LARGE fine-tuned for English Question Generation -BART LARGE Model fine-tuned on English question generation dataset (SQuAD) with an extensive hyper-parameter search. -- [Online Demo](https://autoqg.net/) -- [Project Repository](https://github.com/asahi417/lm-question-generation) +# Language Models Fine-tuning on Question Generation: `lmqg/bart-large-squad` +This model is fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) for question generation task on the +[lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (dataset_name: default). -## Overview -**Language model:** facebook/bart-large -**Language:** English (en) -**Downstream-task:** Question Generation -**Training data:** SQuAD -**Eval data:** SQuAD -**Code:** See [our repository](https://github.com/asahi417/lm-question-generation) +### Overview +- **Language model:** [facebook/bart-large](https://huggingface.co/facebook/bart-large) +- **Language:** en +- **Training data:** [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (default) +- **Online Demo:** [https://autoqg.net/](https://autoqg.net/) +- **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) +- **Paper:** [TBA](TBA) -## Usage -### In Transformers +### Usage ```python + from transformers import pipeline -model_path = 'asahi417/lmqg-bart-large-squad' +model_path = 'lmqg/bart-large-squad' pipe = pipeline("text2text-generation", model_path) -paragraph = 'Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.' -# highlight an answer in the paragraph to generate question -answer = 'Etta James' -highlight_token = '' -input_text = paragraph.replace(answer, '{0} {1} {0}'.format(highlight_token, answer)) -input_text = 'generate question: {}'.format(input_text) # add task specific prefix -generation = pipe(input_text) -print(generation) ->>> [{'generated_text': 'What is the name of the biopic that Beyonce starred in?'}] +# Question Generation +input_text = 'generate question: Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.' +question = pipe(input_text) ``` -## Evaluations +## Evaluation Metrics -Evaluation on the test set of [SQuAD QG dataset](https://huggingface.co/datasets/asahi417/qg_squad). -The results are comparable with the [leaderboard](https://paperswithcode.com/sota/question-generation-on-squad11) and previous works. -All evaluations were done using our [evaluation script](https://github.com/asahi417/lm-question-generation). +### Metrics -| BLEU 4 | ROUGE L | METEOR | BERTScore | MoverScore | -| ------ | -------- | ------ | --------- | ---------- | -| 26.16 | 53.84 | 27.07 | 91.00 | 64.99 | +| Dataset | Type | BLEU4 | ROUGE-L | METEOR | BERTScore | MoverScore | Link | +|:--------|:-----|------:|--------:|-------:|----------:|-----------:|-----:| +| [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | default | 0.26168385362299557 | 0.5384959163821219 | 0.27073122286541956 | 0.9100413219045603 | 0.6499011626820898 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_squad.default.json) | -- [metric file](https://huggingface.co/asahi417/lmqg-bart-large-squad/raw/main/eval/metric.first.sentence.paragraph_answer.question.asahi417_qg_squad.default.json) -## Fine-tuning Parameters -We ran grid search to find the best hyper-parameters and continued fine-tuning until the validation metric decrease. -The best hyper-parameters can be found [here](https://huggingface.co/asahi417/lmqg-bart-large-squad/raw/main/trainer_config.json), and fine-tuning script is released in [our repository](https://github.com/asahi417/lm-question-generation). +### Out-of-domain Metrics + +| Dataset | Type | BLEU4 | ROUGE-L | METEOR | BERTScore | MoverScore | Link | +|:--------|:-----|------:|--------:|-------:|----------:|-----------:|-----:| +| [lmqg/qg_squadshifts](https://huggingface.co/datasets/lmqg/qg_squadshifts) | reddit | 0.059525104157825456 | 0.22365090580055863 | 0.21499800504546457 | 0.9095144685254328 | 0.6059332247878408 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.reddit.json) | +| [lmqg/qg_squadshifts](https://huggingface.co/datasets/lmqg/qg_squadshifts) | new_wiki | 0.11118273173452982 | 0.2967546690273089 | 0.27315087810722966 | 0.9322739617807421 | 0.6623000084761579 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.new_wiki.json) | +| [lmqg/qg_subjqa](https://huggingface.co/datasets/lmqg/qg_subjqa) | tripadvisor | 8.380171318718442e-07 | 0.1402922852924756 | 0.1372146070365174 | 0.8891002409937424 | 0.5604572211470809 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.tripadvisor.json) | +| [lmqg/qg_squadshifts](https://huggingface.co/datasets/lmqg/qg_squadshifts) | default | 0.07839941048417529 | 0.25357667226247294 | 0.24046838149047955 | 0.9182198703598111 | 0.6274693859765924 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.default.json) | +| [lmqg/qg_squadshifts](https://huggingface.co/datasets/lmqg/qg_squadshifts) | nyt | 0.08117757543966063 | 0.25292097720734297 | 0.25254205113198686 | 0.9249009759439454 | 0.6406329128556304 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.nyt.json) | +| [lmqg/qg_subjqa](https://huggingface.co/datasets/lmqg/qg_subjqa) | restaurants | 1.1301750984972448e-06 | 0.13083168975354642 | 0.12419733006916912 | 0.8797711839570719 | 0.5542757411268555 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.restaurants.json) | +| [lmqg/qg_subjqa](https://huggingface.co/datasets/lmqg/qg_subjqa) | electronics | 0.00866799444965211 | 0.1601628874804186 | 0.15348605312210778 | 0.8783386920680519 | 0.5634845371093992 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.electronics.json) | +| [lmqg/qg_subjqa](https://huggingface.co/datasets/lmqg/qg_subjqa) | books | 0.006278914808207679 | 0.12368226019088967 | 0.11576293675813865 | 0.8807110440044503 | 0.5555905941686486 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.books.json) | +| [lmqg/qg_subjqa](https://huggingface.co/datasets/lmqg/qg_subjqa) | movies | 1.0121579426501661e-06 | 0.12508697028506718 | 0.11862284941640638 | 0.8748829724726739 | 0.5528899173535703 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.movies.json) | +| [lmqg/qg_subjqa](https://huggingface.co/datasets/lmqg/qg_subjqa) | grocery | 0.00528043272450429 | 0.12343711316491492 | 0.15133496445452477 | 0.8778951253890991 | 0.5701949938103265 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.grocery.json) | +| [lmqg/qg_squadshifts](https://huggingface.co/datasets/lmqg/qg_squadshifts) | amazon | 0.06530369842068952 | 0.25030985091008146 | 0.2229994442645732 | 0.9092814804525936 | 0.6086538514008419 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.amazon.json) | +| [lmqg/qg_subjqa](https://huggingface.co/datasets/lmqg/qg_subjqa) | default | 0.005121882223046874 | 0.1346485324169255 | 0.13733272662214893 | 0.8811488576438816 | 0.5614233235005509 | [link](https://huggingface.co/lmqg/bart-large-squad/raw/main/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.default.json) | -## Citation -TBA +## Training hyperparameters + +The following hyperparameters were used during fine-tuning: + - dataset_path: lmqg/qg_squad + - dataset_name: default + - input_types: ['paragraph_answer'] + - output_types: ['question'] + - prefix_types: None + - model: facebook/bart-large + - max_length: 512 + - max_length_output: 32 + - epoch: 4 + - batch: 32 + - lr: 5e-05 + - fp16: False + - random_seed: 1 + - gradient_accumulation_steps: 4 + - label_smoothing: 0.15 +The full configuration can be found at [fine-tuning config file](https://huggingface.co/lmqg/bart-large-squad/raw/main/trainer_config.json). + +## Citation +TBA diff --git a/eval/metric.first.answer.paragraph_answer.question.asahi417_qg_squad.default.json b/eval/metric.first.answer.paragraph_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.first.answer.paragraph_answer.question.asahi417_qg_squad.default.json rename to eval/metric.first.answer.paragraph_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.first.answer.paragraph_sentence.question.asahi417_qg_squad.default.json b/eval/metric.first.answer.paragraph_sentence.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.first.answer.paragraph_sentence.question.asahi417_qg_squad.default.json rename to eval/metric.first.answer.paragraph_sentence.question.lmqg_qg_squad.default.json diff --git a/eval/metric.first.answer.sentence_answer.question.asahi417_qg_squad.default.json b/eval/metric.first.answer.sentence_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.first.answer.sentence_answer.question.asahi417_qg_squad.default.json rename to eval/metric.first.answer.sentence_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.first.sentence.paragraph_answer.question.asahi417_qg_squad.default.json b/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.first.sentence.paragraph_answer.question.asahi417_qg_squad.default.json rename to eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.first.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json b/eval/metric.first.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.first.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json rename to eval/metric.first.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json diff --git a/eval/metric.first.sentence.sentence_answer.question.asahi417_qg_squad.default.json b/eval/metric.first.sentence.sentence_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.first.sentence.sentence_answer.question.asahi417_qg_squad.default.json rename to eval/metric.first.sentence.sentence_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.last.sentence.paragraph_answer.question.asahi417_qg_squad.default.json b/eval/metric.last.sentence.paragraph_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.last.sentence.paragraph_answer.question.asahi417_qg_squad.default.json rename to eval/metric.last.sentence.paragraph_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.last.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json b/eval/metric.last.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.last.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json rename to eval/metric.last.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json diff --git a/eval/metric.last.sentence.sentence_answer.question.asahi417_qg_squad.default.json b/eval/metric.last.sentence.sentence_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.last.sentence.sentence_answer.question.asahi417_qg_squad.default.json rename to eval/metric.last.sentence.sentence_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.long.sentence.paragraph_answer.question.asahi417_qg_squad.default.json b/eval/metric.long.sentence.paragraph_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.long.sentence.paragraph_answer.question.asahi417_qg_squad.default.json rename to eval/metric.long.sentence.paragraph_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.long.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json b/eval/metric.long.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.long.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json rename to eval/metric.long.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json diff --git a/eval/metric.long.sentence.sentence_answer.question.asahi417_qg_squad.default.json b/eval/metric.long.sentence.sentence_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.long.sentence.sentence_answer.question.asahi417_qg_squad.default.json rename to eval/metric.long.sentence.sentence_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.middle.sentence.paragraph_answer.question.asahi417_qg_squad.default.json b/eval/metric.middle.sentence.paragraph_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.middle.sentence.paragraph_answer.question.asahi417_qg_squad.default.json rename to eval/metric.middle.sentence.paragraph_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.middle.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json b/eval/metric.middle.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.middle.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json rename to eval/metric.middle.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json diff --git a/eval/metric.middle.sentence.sentence_answer.question.asahi417_qg_squad.default.json b/eval/metric.middle.sentence.sentence_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.middle.sentence.sentence_answer.question.asahi417_qg_squad.default.json rename to eval/metric.middle.sentence.sentence_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.short.sentence.paragraph_answer.question.asahi417_qg_squad.default.json b/eval/metric.short.sentence.paragraph_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.short.sentence.paragraph_answer.question.asahi417_qg_squad.default.json rename to eval/metric.short.sentence.paragraph_answer.question.lmqg_qg_squad.default.json diff --git a/eval/metric.short.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json b/eval/metric.short.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.short.sentence.paragraph_sentence.question.asahi417_qg_squad.default.json rename to eval/metric.short.sentence.paragraph_sentence.question.lmqg_qg_squad.default.json diff --git a/eval/metric.short.sentence.sentence_answer.question.asahi417_qg_squad.default.json b/eval/metric.short.sentence.sentence_answer.question.lmqg_qg_squad.default.json similarity index 100% rename from eval/metric.short.sentence.sentence_answer.question.asahi417_qg_squad.default.json rename to eval/metric.short.sentence.sentence_answer.question.lmqg_qg_squad.default.json diff --git a/eval/samples.test.hyp.paragraph_answer.question.asahi417_qg_squad.default.txt b/eval/samples.test.hyp.paragraph_answer.question.lmqg_qg_squad.default.txt similarity index 100% rename from eval/samples.test.hyp.paragraph_answer.question.asahi417_qg_squad.default.txt rename to eval/samples.test.hyp.paragraph_answer.question.lmqg_qg_squad.default.txt diff --git a/eval/samples.test.hyp.paragraph_sentence.question.asahi417_qg_squad.default.txt b/eval/samples.test.hyp.paragraph_sentence.question.lmqg_qg_squad.default.txt similarity index 100% rename from eval/samples.test.hyp.paragraph_sentence.question.asahi417_qg_squad.default.txt rename to eval/samples.test.hyp.paragraph_sentence.question.lmqg_qg_squad.default.txt diff --git a/eval/samples.test.hyp.sentence_answer.question.asahi417_qg_squad.default.txt b/eval/samples.test.hyp.sentence_answer.question.lmqg_qg_squad.default.txt similarity index 100% rename from eval/samples.test.hyp.sentence_answer.question.asahi417_qg_squad.default.txt rename to eval/samples.test.hyp.sentence_answer.question.lmqg_qg_squad.default.txt diff --git a/eval/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squad.default.txt b/eval/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squad.default.txt similarity index 100% rename from eval/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squad.default.txt rename to eval/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squad.default.txt diff --git a/eval/samples.validation.hyp.paragraph_sentence.question.asahi417_qg_squad.default.txt b/eval/samples.validation.hyp.paragraph_sentence.question.lmqg_qg_squad.default.txt similarity index 100% rename from eval/samples.validation.hyp.paragraph_sentence.question.asahi417_qg_squad.default.txt rename to eval/samples.validation.hyp.paragraph_sentence.question.lmqg_qg_squad.default.txt diff --git a/eval/samples.validation.hyp.sentence_answer.question.asahi417_qg_squad.default.txt b/eval/samples.validation.hyp.sentence_answer.question.lmqg_qg_squad.default.txt similarity index 100% rename from eval/samples.validation.hyp.sentence_answer.question.asahi417_qg_squad.default.txt rename to eval/samples.validation.hyp.sentence_answer.question.lmqg_qg_squad.default.txt diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.amazon.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.amazon.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.amazon.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.amazon.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.default.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.default.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.default.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.default.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.new_wiki.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.new_wiki.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.new_wiki.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.new_wiki.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.nyt.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.nyt.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.nyt.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.nyt.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.reddit.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.reddit.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_squadshifts.reddit.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_squadshifts.reddit.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.books.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.books.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.books.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.books.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.default.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.default.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.default.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.default.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.electronics.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.electronics.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.electronics.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.electronics.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.grocery.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.grocery.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.grocery.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.grocery.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.movies.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.movies.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.movies.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.movies.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.restaurants.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.restaurants.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.restaurants.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.restaurants.json diff --git a/eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.tripadvisor.json b/eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.tripadvisor.json similarity index 100% rename from eval_ood/metric.first.sentence.paragraph_answer.question.asahi417_qg_subjqa.tripadvisor.json rename to eval_ood/metric.first.sentence.paragraph_answer.question.lmqg_qg_subjqa.tripadvisor.json diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.amazon.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.amazon.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.amazon.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.amazon.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.default.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.default.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.default.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.default.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.new_wiki.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.new_wiki.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.new_wiki.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.new_wiki.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.nyt.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.nyt.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.nyt.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.nyt.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.reddit.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.reddit.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_squadshifts.reddit.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_squadshifts.reddit.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.books.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.books.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.books.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.books.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.default.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.default.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.default.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.default.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.electronics.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.electronics.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.electronics.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.electronics.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.grocery.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.grocery.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.grocery.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.grocery.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.movies.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.movies.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.movies.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.movies.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.restaurants.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.restaurants.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.restaurants.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.restaurants.txt diff --git a/eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.tripadvisor.txt b/eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.tripadvisor.txt similarity index 100% rename from eval_ood/samples.test.hyp.paragraph_answer.question.asahi417_qg_subjqa.tripadvisor.txt rename to eval_ood/samples.test.hyp.paragraph_answer.question.lmqg_qg_subjqa.tripadvisor.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.amazon.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.amazon.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.amazon.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.amazon.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.default.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.default.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.default.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.default.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.new_wiki.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.new_wiki.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.new_wiki.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.new_wiki.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.nyt.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.nyt.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.nyt.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.nyt.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.reddit.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.reddit.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_squadshifts.reddit.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_squadshifts.reddit.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.books.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.books.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.books.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.books.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.default.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.default.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.default.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.default.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.electronics.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.electronics.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.electronics.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.electronics.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.grocery.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.grocery.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.grocery.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.grocery.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.movies.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.movies.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.movies.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.movies.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.restaurants.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.restaurants.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.restaurants.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.restaurants.txt diff --git a/eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.tripadvisor.txt b/eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.tripadvisor.txt similarity index 100% rename from eval_ood/samples.validation.hyp.paragraph_answer.question.asahi417_qg_subjqa.tripadvisor.txt rename to eval_ood/samples.validation.hyp.paragraph_answer.question.lmqg_qg_subjqa.tripadvisor.txt diff --git a/trainer_config.json b/trainer_config.json index 34036fd30c2046f2fb1d22240794ed0a0733174b..cf29bcd9a0704392803a99633e295330d4d7284a 100644 --- a/trainer_config.json +++ b/trainer_config.json @@ -1 +1 @@ -{"dataset_path": "asahi417/qg_squad", "dataset_name": "default", "input_types": ["paragraph_answer"], "output_types": ["question"], "prefix_types": null, "model": "facebook/bart-large", "max_length": 512, "max_length_output": 32, "epoch": 4, "batch": 32, "lr": 5e-05, "fp16": false, "random_seed": 1, "gradient_accumulation_steps": 4, "label_smoothing": 0.15} \ No newline at end of file +{"dataset_path": "lmqg/qg_squad", "dataset_name": "default", "input_types": ["paragraph_answer"], "output_types": ["question"], "prefix_types": null, "model": "facebook/bart-large", "max_length": 512, "max_length_output": 32, "epoch": 4, "batch": 32, "lr": 5e-05, "fp16": false, "random_seed": 1, "gradient_accumulation_steps": 4, "label_smoothing": 0.15} \ No newline at end of file