para_crawl / README.md
albertvillanova's picture
Convert dataset sizes from base 2 to base 10 in the dataset card (#1)
e48a28a
metadata
annotations_creators:
  - no-annotation
language_creators:
  - found
language:
  - bg
  - cs
  - da
  - de
  - el
  - en
  - es
  - et
  - fi
  - fr
  - ga
  - hr
  - hu
  - it
  - lt
  - lv
  - mt
  - nl
  - pl
  - pt
  - ro
  - sk
  - sl
  - sv
license:
  - cc0-1.0
multilinguality:
  - translation
pretty_name: ParaCrawl
size_categories:
  - 10M<n<100M
source_datasets:
  - original
task_categories:
  - translation
task_ids: []
paperswithcode_id: paracrawl
dataset_info:
  - config_name: enbg
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - bg
    splits:
      - name: train
        num_bytes: 356532771
        num_examples: 1039885
    download_size: 103743335
    dataset_size: 356532771
  - config_name: encs
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - cs
    splits:
      - name: train
        num_bytes: 638068353
        num_examples: 2981949
    download_size: 196410022
    dataset_size: 638068353
  - config_name: enda
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - da
    splits:
      - name: train
        num_bytes: 598624306
        num_examples: 2414895
    download_size: 182804827
    dataset_size: 598624306
  - config_name: ende
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - de
    splits:
      - name: train
        num_bytes: 3997191986
        num_examples: 16264448
    download_size: 1307754745
    dataset_size: 3997191986
  - config_name: enel
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - el
    splits:
      - name: train
        num_bytes: 688069020
        num_examples: 1985233
    download_size: 193553374
    dataset_size: 688069020
  - config_name: enes
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - es
    splits:
      - name: train
        num_bytes: 6209466040
        num_examples: 21987267
    download_size: 1953839527
    dataset_size: 6209466040
  - config_name: enet
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - et
    splits:
      - name: train
        num_bytes: 201408919
        num_examples: 853422
    download_size: 70158650
    dataset_size: 201408919
  - config_name: enfi
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - fi
    splits:
      - name: train
        num_bytes: 524624150
        num_examples: 2156069
    download_size: 159209242
    dataset_size: 524624150
  - config_name: enfr
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - fr
    splits:
      - name: train
        num_bytes: 9015440258
        num_examples: 31374161
    download_size: 2827554088
    dataset_size: 9015440258
  - config_name: enga
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - ga
    splits:
      - name: train
        num_bytes: 104523278
        num_examples: 357399
    download_size: 29394367
    dataset_size: 104523278
  - config_name: enhr
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - hr
    splits:
      - name: train
        num_bytes: 247646552
        num_examples: 1002053
    download_size: 84904103
    dataset_size: 247646552
  - config_name: enhu
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - hu
    splits:
      - name: train
        num_bytes: 403168065
        num_examples: 1901342
    download_size: 119784765
    dataset_size: 403168065
  - config_name: enit
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - it
    splits:
      - name: train
        num_bytes: 3340542050
        num_examples: 12162239
    download_size: 1066720197
    dataset_size: 3340542050
  - config_name: enlt
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - lt
    splits:
      - name: train
        num_bytes: 197053694
        num_examples: 844643
    download_size: 66358392
    dataset_size: 197053694
  - config_name: enlv
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - lv
    splits:
      - name: train
        num_bytes: 142409870
        num_examples: 553060
    download_size: 47368967
    dataset_size: 142409870
  - config_name: enmt
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - mt
    splits:
      - name: train
        num_bytes: 52786023
        num_examples: 195502
    download_size: 19028352
    dataset_size: 52786023
  - config_name: ennl
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - nl
    splits:
      - name: train
        num_bytes: 1384042007
        num_examples: 5659268
    download_size: 420090979
    dataset_size: 1384042007
  - config_name: enpl
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - pl
    splits:
      - name: train
        num_bytes: 854786500
        num_examples: 3503276
    download_size: 270427885
    dataset_size: 854786500
  - config_name: enpt
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - pt
    splits:
      - name: train
        num_bytes: 2031891156
        num_examples: 8141940
    download_size: 638184462
    dataset_size: 2031891156
  - config_name: enro
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - ro
    splits:
      - name: train
        num_bytes: 518359240
        num_examples: 1952043
    download_size: 160684751
    dataset_size: 518359240
  - config_name: ensk
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - sk
    splits:
      - name: train
        num_bytes: 337704729
        num_examples: 1591831
    download_size: 101307152
    dataset_size: 337704729
  - config_name: ensl
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - sl
    splits:
      - name: train
        num_bytes: 182399034
        num_examples: 660161
    download_size: 65037465
    dataset_size: 182399034
  - config_name: ensv
    features:
      - name: translation
        dtype:
          translation:
            languages:
              - en
              - sv
    splits:
      - name: train
        num_bytes: 875576366
        num_examples: 3476729
    download_size: 275528370
    dataset_size: 875576366

Dataset Card for "para_crawl"

Table of Contents

Dataset Description

Dataset Summary

Web-Scale Parallel Corpora for Official European Languages.

Supported Tasks and Leaderboards

More Information Needed

Languages

More Information Needed

Dataset Structure

Data Instances

enbg

  • Size of downloaded dataset files: 103.75 MB
  • Size of the generated dataset: 356.54 MB
  • Total amount of disk used: 460.27 MB

An example of 'train' looks as follows.

This example was too long and was cropped:

{
    "translation": "{\"bg\": \". “A felirat faragott karnis a bejárat fölött, templom épült 14 Július 1643, A földesúr és felesége Jeremiás Murguleţ, C..."
}

encs

  • Size of downloaded dataset files: 196.41 MB
  • Size of the generated dataset: 638.07 MB
  • Total amount of disk used: 834.48 MB

An example of 'train' looks as follows.

This example was too long and was cropped:

{
    "translation": "{\"cs\": \". “A felirat faragott karnis a bejárat fölött, templom épült 14 Július 1643, A földesúr és felesége Jeremiás Murguleţ, C..."
}

enda

  • Size of downloaded dataset files: 182.81 MB
  • Size of the generated dataset: 598.62 MB
  • Total amount of disk used: 781.43 MB

An example of 'train' looks as follows.

This example was too long and was cropped:

{
    "translation": "{\"da\": \". “A felirat faragott karnis a bejárat fölött, templom épült 14 Július 1643, A földesúr és felesége Jeremiás Murguleţ, C..."
}

ende

  • Size of downloaded dataset files: 1.31 GB
  • Size of the generated dataset: 4.00 GB
  • Total amount of disk used: 5.30 GB

An example of 'train' looks as follows.

This example was too long and was cropped:

{
    "translation": "{\"de\": \". “A felirat faragott karnis a bejárat fölött, templom épült 14 Július 1643, A földesúr és felesége Jeremiás Murguleţ, C..."
}

enel

  • Size of downloaded dataset files: 193.56 MB
  • Size of the generated dataset: 688.07 MB
  • Total amount of disk used: 881.62 MB

An example of 'train' looks as follows.

This example was too long and was cropped:

{
    "translation": "{\"el\": \". “A felirat faragott karnis a bejárat fölött, templom épült 14 Július 1643, A földesúr és felesége Jeremiás Murguleţ, C..."
}

Data Fields

The data fields are the same among all splits.

enbg

  • translation: a multilingual string variable, with possible languages including en, bg.

encs

  • translation: a multilingual string variable, with possible languages including en, cs.

enda

  • translation: a multilingual string variable, with possible languages including en, da.

ende

  • translation: a multilingual string variable, with possible languages including en, de.

enel

  • translation: a multilingual string variable, with possible languages including en, el.

Data Splits

name train
enbg 1039885
encs 2981949
enda 2414895
ende 16264448
enel 1985233

Dataset Creation

Curation Rationale

More Information Needed

Source Data

Initial Data Collection and Normalization

More Information Needed

Who are the source language producers?

More Information Needed

Annotations

Annotation process

More Information Needed

Who are the annotators?

More Information Needed

Personal and Sensitive Information

More Information Needed

Considerations for Using the Data

Social Impact of Dataset

More Information Needed

Discussion of Biases

More Information Needed

Other Known Limitations

More Information Needed

Additional Information

Dataset Curators

More Information Needed

Licensing Information

Creative Commons CC0 license ("no rights reserved").

Citation Information

@inproceedings{banon-etal-2020-paracrawl,
    title = "{P}ara{C}rawl: Web-Scale Acquisition of Parallel Corpora",
    author = "Ba{\~n}{\'o}n, Marta  and
      Chen, Pinzhen  and
      Haddow, Barry  and
      Heafield, Kenneth  and
      Hoang, Hieu  and
      Espl{\`a}-Gomis, Miquel  and
      Forcada, Mikel L.  and
      Kamran, Amir  and
      Kirefu, Faheem  and
      Koehn, Philipp  and
      Ortiz Rojas, Sergio  and
      Pla Sempere, Leopoldo  and
      Ram{\'\i}rez-S{\'a}nchez, Gema  and
      Sarr{\'\i}as, Elsa  and
      Strelec, Marek  and
      Thompson, Brian  and
      Waites, William  and
      Wiggins, Dion  and
      Zaragoza, Jaume",
    booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
    month = jul,
    year = "2020",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2020.acl-main.417",
    doi = "10.18653/v1/2020.acl-main.417",
    pages = "4555--4567",
    abstract = "We report on methods to create the largest publicly available parallel corpora by crawling the web, using open source software. We empirically compare alternative methods and publish benchmark data sets for sentence alignment and sentence pair filtering. We also describe the parallel corpora released and evaluate their quality and their usefulness to create machine translation systems.",
}

Contributions

Thanks to @thomwolf, @lewtun, @patrickvonplaten, @mariamabarham for adding this dataset.