cc100-samples / README.md
eson's picture
Update README.md
e0df0d6 verified
metadata
annotations_creators:
  - no-annotation
language_creators:
  - found
language:
  - af
  - am
  - ar
  - as
  - az
  - be
  - bg
  - bn
  - br
  - bs
  - ca
  - cs
  - cy
  - da
  - de
  - el
  - en
  - eo
  - es
  - et
  - eu
  - fa
  - ff
  - fi
  - fr
  - fy
  - ga
  - gd
  - gl
  - gn
  - gu
  - ha
  - he
  - hi
  - hr
  - ht
  - hu
  - hy
  - id
  - ig
  - is
  - it
  - ja
  - jv
  - ka
  - kk
  - km
  - kn
  - ko
  - ku
  - ky
  - la
  - lg
  - li
  - ln
  - lo
  - lt
  - lv
  - mg
  - mk
  - ml
  - mn
  - mr
  - ms
  - my
  - ne
  - nl
  - 'no'
  - ns
  - om
  - or
  - pa
  - pl
  - ps
  - pt
  - qu
  - rm
  - ro
  - ru
  - sa
  - sc
  - sd
  - si
  - sk
  - sl
  - so
  - sq
  - sr
  - ss
  - su
  - sv
  - sw
  - ta
  - te
  - th
  - tl
  - tn
  - tr
  - ug
  - uk
  - ur
  - uz
  - vi
  - wo
  - xh
  - yi
  - yo
  - zh
  - zu
language_bcp47:
  - bn-Latn
  - hi-Latn
  - my-x-zawgyi
  - ta-Latn
  - te-Latn
  - ur-Latn
  - zh-Hans
  - zh-Hant
license:
  - unknown
multilinguality:
  - multilingual
size_categories:
  - 1K<n<10K
source_datasets:
  - original
task_categories:
  - text-generation
  - fill-mask
task_ids:
  - language-modeling
  - masked-language-modeling
paperswithcode_id: cc100
pretty_name: CC100
configs:
  - config_name: am
    data_files:
      - split: train
        path: data/am.txt
  - config_name: ar
    data_files:
      - split: train
        path: data/ar.txt
  - config_name: as
    data_files:
      - split: train
        path: data/as.txt
  - config_name: az
    data_files:
      - split: train
        path: data/az.txt
  - config_name: be
    data_files:
      - split: train
        path: data/be.txt
  - config_name: bg
    data_files:
      - split: train
        path: data/bg.txt
  - config_name: bn
    data_files:
      - split: train
        path: data/bn.txt
  - config_name: bn_rom
    data_files:
      - split: train
        path: data/bn_rom.txt
  - config_name: br
    data_files:
      - split: train
        path: data/br.txt
  - config_name: bs
    data_files:
      - split: train
        path: data/bs.txt
  - config_name: ca
    data_files:
      - split: train
        path: data/ca.txt
  - config_name: cs
    data_files:
      - split: train
        path: data/cs.txt
  - config_name: cy
    data_files:
      - split: train
        path: data/cy.txt
  - config_name: da
    data_files:
      - split: train
        path: data/da.txt
  - config_name: de
    data_files:
      - split: train
        path: data/de.txt
  - config_name: el
    data_files:
      - split: train
        path: data/el.txt
  - config_name: en
    data_files:
      - split: train
        path: data/en.txt
  - config_name: eo
    data_files:
      - split: train
        path: data/eo.txt
  - config_name: es
    data_files:
      - split: train
        path: data/es.txt
  - config_name: et
    data_files:
      - split: train
        path: data/et.txt
  - config_name: eu
    data_files:
      - split: train
        path: data/eu.txt
  - config_name: fa
    data_files:
      - split: train
        path: data/fa.txt
  - config_name: ff
    data_files:
      - split: train
        path: data/ff.txt
  - config_name: fi
    data_files:
      - split: train
        path: data/fi.txt
  - config_name: fr
    data_files:
      - split: train
        path: data/fr.txt
  - config_name: fy
    data_files:
      - split: train
        path: data/fy.txt
  - config_name: ga
    data_files:
      - split: train
        path: data/ga.txt
  - config_name: gd
    data_files:
      - split: train
        path: data/gd.txt
  - config_name: gl
    data_files:
      - split: train
        path: data/gl.txt
  - config_name: gn
    data_files:
      - split: train
        path: data/gn.txt
  - config_name: gu
    data_files:
      - split: train
        path: data/gu.txt
  - config_name: ha
    data_files:
      - split: train
        path: data/ha.txt
  - config_name: he
    data_files:
      - split: train
        path: data/he.txt
  - config_name: hi
    data_files:
      - split: train
        path: data/hi.txt
  - config_name: hi_rom
    data_files:
      - split: train
        path: data/hi_rom.txt
  - config_name: hr
    data_files:
      - split: train
        path: data/hr.txt
  - config_name: ht
    data_files:
      - split: train
        path: data/ht.txt
  - config_name: hu
    data_files:
      - split: train
        path: data/hu.txt
  - config_name: hy
    data_files:
      - split: train
        path: data/hy.txt
  - config_name: id
    data_files:
      - split: train
        path: data/id.txt
  - config_name: ig
    data_files:
      - split: train
        path: data/ig.txt
  - config_name: is
    data_files:
      - split: train
        path: data/is.txt
  - config_name: it
    data_files:
      - split: train
        path: data/it.txt
  - config_name: ja
    data_files:
      - split: train
        path: data/ja.txt
  - config_name: jv
    data_files:
      - split: train
        path: data/jv.txt
  - config_name: ka
    data_files:
      - split: train
        path: data/ka.txt
  - config_name: kk
    data_files:
      - split: train
        path: data/kk.txt
  - config_name: km
    data_files:
      - split: train
        path: data/km.txt
  - config_name: kn
    data_files:
      - split: train
        path: data/kn.txt
  - config_name: ko
    data_files:
      - split: train
        path: data/ko.txt
  - config_name: ku
    data_files:
      - split: train
        path: data/ku.txt
  - config_name: ky
    data_files:
      - split: train
        path: data/ky.txt
  - config_name: la
    data_files:
      - split: train
        path: data/la.txt
  - config_name: lg
    data_files:
      - split: train
        path: data/lg.txt
  - config_name: li
    data_files:
      - split: train
        path: data/li.txt
  - config_name: ln
    data_files:
      - split: train
        path: data/ln.txt
  - config_name: lo
    data_files:
      - split: train
        path: data/lo.txt
  - config_name: lt
    data_files:
      - split: train
        path: data/lt.txt
  - config_name: lv
    data_files:
      - split: train
        path: data/lv.txt
  - config_name: mg
    data_files:
      - split: train
        path: data/mg.txt
  - config_name: mk
    data_files:
      - split: train
        path: data/mk.txt
  - config_name: ml
    data_files:
      - split: train
        path: data/ml.txt
  - config_name: mn
    data_files:
      - split: train
        path: data/mn.txt
  - config_name: mr
    data_files:
      - split: train
        path: data/mr.txt
  - config_name: ms
    data_files:
      - split: train
        path: data/ms.txt
  - config_name: my
    data_files:
      - split: train
        path: data/my.txt
  - config_name: my_zaw
    data_files:
      - split: train
        path: data/my_zaw.txt
  - config_name: ne
    data_files:
      - split: train
        path: data/ne.txt
  - config_name: nl
    data_files:
      - split: train
        path: data/nl.txt
  - config_name: 'no'
    data_files:
      - split: train
        path: data/no.txt
  - config_name: ns
    data_files:
      - split: train
        path: data/ns.txt
  - config_name: om
    data_files:
      - split: train
        path: data/om.txt
  - config_name: or
    data_files:
      - split: train
        path: data/or.txt
  - config_name: pa
    data_files:
      - split: train
        path: data/pa.txt
  - config_name: pl
    data_files:
      - split: train
        path: data/pl.txt
  - config_name: ps
    data_files:
      - split: train
        path: data/ps.txt
  - config_name: pt
    data_files:
      - split: train
        path: data/pt.txt
  - config_name: qu
    data_files:
      - split: train
        path: data/qu.txt
  - config_name: rm
    data_files:
      - split: train
        path: data/rm.txt
  - config_name: ro
    data_files:
      - split: train
        path: data/ro.txt
  - config_name: ru
    data_files:
      - split: train
        path: data/ru.txt
  - config_name: sa
    data_files:
      - split: train
        path: data/sa.txt
  - config_name: si
    data_files:
      - split: train
        path: data/si.txt
  - config_name: sc
    data_files:
      - split: train
        path: data/sc.txt
  - config_name: sd
    data_files:
      - split: train
        path: data/sd.txt
  - config_name: sk
    data_files:
      - split: train
        path: data/sk.txt
  - config_name: sl
    data_files:
      - split: train
        path: data/sl.txt
  - config_name: so
    data_files:
      - split: train
        path: data/so.txt
  - config_name: sq
    data_files:
      - split: train
        path: data/sq.txt
  - config_name: sr
    data_files:
      - split: train
        path: data/sr.txt
  - config_name: ss
    data_files:
      - split: train
        path: data/ss.txt
  - config_name: su
    data_files:
      - split: train
        path: data/su.txt
  - config_name: sv
    data_files:
      - split: train
        path: data/sv.txt
  - config_name: sw
    data_files:
      - split: train
        path: data/sw.txt
  - config_name: ta
    data_files:
      - split: train
        path: data/ta.txt
  - config_name: ta_rom
    data_files:
      - split: train
        path: data/ta_rom.txt
  - config_name: te
    data_files:
      - split: train
        path: data/te.txt
  - config_name: te_rom
    data_files:
      - split: train
        path: data/te_rom.txt
  - config_name: th
    data_files:
      - split: train
        path: data/th.txt
  - config_name: tl
    data_files:
      - split: train
        path: data/tl.txt
  - config_name: tn
    data_files:
      - split: train
        path: data/tn.txt
  - config_name: tr
    data_files:
      - split: train
        path: data/tr.txt
  - config_name: ug
    data_files:
      - split: train
        path: data/ug.txt
  - config_name: uk
    data_files:
      - split: train
        path: data/uk.txt
  - config_name: ur
    data_files:
      - split: train
        path: data/ur.txt
  - config_name: ur_rom
    data_files:
      - split: train
        path: data/ur_rom.txt
  - config_name: uz
    data_files:
      - split: train
        path: data/uz.txt
  - config_name: vi
    data_files:
      - split: train
        path: data/vi.txt
  - config_name: wo
    data_files:
      - split: train
        path: data/wo.txt
  - config_name: xh
    data_files:
      - split: train
        path: data/xh.txt
  - config_name: yi
    data_files:
      - split: train
        path: data/yi.txt
  - config_name: yo
    data_files:
      - split: train
        path: data/yo.txt
  - config_name: zh-Hans
    data_files:
      - split: train
        path: data/zh-Hans.txt
  - config_name: zh-Hant
    data_files:
      - split: train
        path: data/zh-Hant.txt
  - config_name: zu
    data_files:
      - split: train
        path: data/zu.txt

The cc100-samples is a subset which contains first 10,000 lines of cc100.

Languages

To load a language which isn't part of the config, all you need to do is specify the language code in the config. You can find the valid languages in Homepage section of Dataset Description: https://data.statmt.org/cc-100/ E.g. dataset = load_dataset("cc100-samples", lang="en")

VALID_CODES = [
    "am", "ar", "as", "az", "be", "bg", "bn", "bn_rom", "br", "bs", "ca", "cs", "cy", "da", "de",
    "el", "en", "eo", "es", "et", "eu", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gn", "gu",
    "ha", "he", "hi", "hi_rom", "hr", "ht", "hu", "hy", "id", "ig", "is", "it", "ja", "jv", "ka",
    "kk", "km", "kn", "ko", "ku", "ky", "la", "lg", "li", "ln", "lo", "lt", "lv", "mg", "mk", "ml",
    "mn", "mr", "ms", "my", "my_zaw", "ne", "nl", "no", "ns", "om", "or", "pa", "pl", "ps", "pt",
    "qu", "rm", "ro", "ru", "sa", "si", "sc", "sd", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv",
    "sw", "ta", "ta_rom", "te", "te_rom", "th", "tl", "tn", "tr", "ug", "uk", "ur", "ur_rom", "uz",
    "vi", "wo", "xh", "yi", "yo", "zh-Hans", "zh-Hant", "zu",
]

Dataset Structure

Data Instances

An example from the am configuration:

{'id': '0', 'text': 'ተለዋዋጭ የግድግዳ አንግል ሙቅ አንቀሳቅሷል ቲ-አሞሌ አጥቅሼ ...\n'}

Each data point is a paragraph of text. The paragraphs are presented in the original (unshuffled) order. Documents are separated by a data point consisting of a single newline character.

Data Fields

The data fields are:

  • id: id of the example
  • text: content as a string