File size: 1,160 Bytes
315533c
 
5e092f3
315533c
 
 
 
 
 
 
 
 
 
846e792
315533c
 
846e792
315533c
 
846e792
315533c
 
846e792
315533c
846e792
0b2ed6e
315533c
 
 
 
 
 
 
 
 
 
 
0b2ed6e
 
 
 
 
 
 
5e092f3
0b2ed6e
 
 
 
 
0595c4c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
---
license: apache-2.0
dataset_info:
  features:
  - name: Token
    dtype: string
  - name: Lemmas
    dtype: string
  - name: PoS
    dtype: string
  - name: Syntax analysis
    sequence: string
  - name: NER
    sequence: string
  splits:
  - name: train
    num_bytes: 6799126.690184049
    num_examples: 68068
  - name: valid
    num_bytes: 377773.65490797546
    num_examples: 3782
  - name: test
    num_bytes: 377773.65490797546
    num_examples: 3782
  download_size: 3039375
  dataset_size: 7554674
configs:
- config_name: default
  data_files:
  - split: train
    path: data/train-*
  - split: valid
    path: data/valid-*
  - split: test
    path: data/test-*
language:
- ru
task_categories:
- token-classification
tags:
- nlp
pretty_name: 'Twilight Tokenized: Russian NLP Dataset'
size_categories:
- 10K<n<100K
---

# Dataset Details

#### This dataset is made for fun and in ✨educational✨ purposes. 

It includes tokenized novel text by Stephenie Meyer "Breaking Dawn" in Russian, PoS, NER (LOC, PER, ORG), tokens' lemmas and syntax annotation. NER, PoS, lemmatization and syntax annotation were made with spaCy and their 'ru_core_news_lg'