Zaid commited on
Commit
a31e2c3
1 Parent(s): 5b8af1a

Upload dummy.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dummy.py +41 -0
dummy.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import datasets
4
+ from glob import glob
5
+ import zipfile
6
+
7
+ class dummy(datasets.GeneratorBasedBuilder):
8
+ def _info(self):
9
+ return datasets.DatasetInfo(features=datasets.Features({'tweet':datasets.Value('string'),'label': datasets.features.ClassLabel(names=['satire', 'non'])}))
10
+
11
+ def extract_all(self, dir):
12
+ zip_files = glob(dir+'/**/**.zip', recursive=True)
13
+ for file in zip_files:
14
+ with zipfile.ZipFile(file) as item:
15
+ item.extractall('/'.join(file.split('/')[:-1]))
16
+
17
+
18
+ def get_all_files(self, dir):
19
+ files = []
20
+ valid_file_ext = ['txt', 'csv', 'tsv', 'xlsx', 'xls', 'xml', 'json', 'jsonl', 'html', 'wav', 'mp3', 'jpg', 'png']
21
+ for ext in valid_file_ext:
22
+ files += glob(f"{dir}/**/**.{ext}", recursive = True)
23
+ return files
24
+
25
+ def _split_generators(self, dl_manager):
26
+ url = ['https://raw.githubusercontent.com/Noza1234/Arbic-satire-dataset/main/sat_nosat_ml_dl.xlsx']
27
+ downloaded_files = dl_manager.download(url)
28
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': {'inputs':downloaded_files} })]
29
+
30
+
31
+ def _generate_examples(self, filepaths):
32
+ _id = 0
33
+ for i,filepath in enumerate(filepaths['inputs']):
34
+ df = pd.read_excel(open(filepath, 'rb'), skiprows = 0, header = 0)
35
+ if len(df.columns) != 2:
36
+ continue
37
+ df.columns = ['tweet', 'label']
38
+ for _, record in df.iterrows():
39
+ yield str(_id), {'tweet':record['tweet'],'label':str(record['label'])}
40
+ _id += 1
41
+