File size: 1,891 Bytes
ab4564e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11c76cc
ab4564e
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# --------------------------------------------
import keyring as kr
import os
import random
import json
import re
import sys
import time
from collections import defaultdict
from functools import reduce

import codefast as cf
import joblib
import numpy as np
import pandas as pd
from rich import print
from typing import List, Union, Callable, Set, Dict, Tuple, Optional, Any

from codefast.patterns.pipeline import Pipeline, BeeMaxin
# —--------------------------------------------
from datasets import load_dataset


class DataLoader(BeeMaxin):
    def __init__(self) -> None:
        super().__init__()

    def process(self):
        files = []
        for f in cf.io.walk('jsons/'):
            files.append(f)
        return files


class ToCsv(BeeMaxin):
    def to_csv(self, json_file: str):
        texts, labels = [], []
        with open(json_file, 'r') as f:
            for line in f:
                line = json.loads(line)
                texts.append(line['text'])
                _label = ' '.join(line['labels'])
                labels.append(_label)
        task_name = cf.io.basename(json_file).replace('.json', '')
        return pd.DataFrame({'text': texts, 'labels': labels,
                             'task_name': task_name})

    def process(self, files: List[str]):
        """ Merge all ner data into a train.csv
        """
        df = pd.DataFrame()
        for f in files:
            cf.info({
                'message': f'processing {f}'
            })
            newdf = self.to_csv(f)
            df = pd.concat([df, newdf], axis=0)
        df.to_csv('train.csv', index=False)
        df.sample(10).to_csv('dev.csv', index=False)
        df.sample(10).to_csv('test.csv', index=False)


if __name__ == '__main__':
    pl = Pipeline(
        [
            ('dloader', DataLoader()),
            ('csv converter', ToCsv())
        ]
    )
    pl.gather()