saied commited on
Commit
19bdedc
1 Parent(s): 2bcf1bf

adding sample data and sample script

Browse files
Files changed (2) hide show
  1. clean.zip +3 -0
  2. tasnim_daily.py +68 -0
clean.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55c23af1eb25af74456b5ea2fe05422431bb5172dd7680efd57e0a9b81a228a1
3
+ size 16004
tasnim_daily.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import csv
3
+ import os
4
+ import sys
5
+ csv.field_size_limit(sys.maxsize)
6
+
7
+ _DESCRIPTION = """
8
+ persian_news_dataset is a collection of 5 million news articles.
9
+ News articles have been gathered from more than 10 news agencies for the last 12 years.
10
+ The dataset is provided by Rohan AI lab for research purposes.
11
+ for more information refer to this link:
12
+ """
13
+ _PROJECT_URL = """"""
14
+
15
+
16
+ _CITATION = """
17
+ https://saied71.github.io/RohanAiLab/,
18
+ author={Saied Alimoradi},
19
+ year={2021}
20
+ }
21
+ """
22
+
23
+ _URL = "persian_news_dataset.zip"
24
+
25
+
26
+
27
+ class Persian_news(datasets.GeneratorBasedBuilder):
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "text": datasets.Value("string"),
35
+ "title": datasets.Value("string"),
36
+ "category": datasets.Value("string")
37
+ }
38
+ ),
39
+ homepage=_PROJECT_URL,
40
+ citation=_CITATION,
41
+ )
42
+
43
+
44
+
45
+ def _split_generators(self, dl_manager):
46
+ """Returns SplitGenerators."""
47
+ dl_dir = dl_manager.download_and_extract(_URL)
48
+ data_dir = os.path.join(dl_dir, "persian_news_dataset.csv")
49
+ return [
50
+ datasets.SplitGenerator(
51
+ name=datasets.Split.TRAIN,
52
+ gen_kwargs={
53
+ "filepath": data_dir,
54
+ },),]
55
+
56
+ def _generate_examples(self, filepath):
57
+ """Yields examples."""
58
+ with open(filepath, encoding="utf-8") as f:
59
+ reader = csv.reader(f)
60
+ for id_, row in enumerate(reader):
61
+ if id_ == 0:
62
+ continue
63
+ yield id_, {
64
+ "text": row[0],
65
+ "title": row[2],
66
+ "category": row[1],
67
+ }
68
+