1T Conte commited on
Commit
dc13010
1 Parent(s): 691adf1

feat: first commit

Browse files
Files changed (6) hide show
  1. .gitattributes +3 -0
  2. .gitignore +2 -0
  3. Makefile +4 -0
  4. convert.py +142 -0
  5. test.csv +3 -0
  6. train.csv +3 -0
.gitattributes CHANGED
@@ -53,3 +53,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ test.csv filter=lfs diff=lfs merge=lfs -text
57
+ train.csv filter=lfs diff=lfs merge=lfs -text
58
+ ag_news.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ag_news.csv
2
+ newsSpace
Makefile ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ newsSpace:
2
+ wget http://groups.di.unipi.it/~gulli/newsSpace.bz2
3
+ bzip2 -d newsSpace.bz2
4
+
convert.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This script converts the data from the raw data to CSV files.
3
+
4
+ Usage:
5
+ make newsSpace
6
+ python convert.py
7
+ """
8
+
9
+ import csv
10
+ import html
11
+ import sys
12
+
13
+ import pandas as pd
14
+
15
+ from bs4 import BeautifulSoup
16
+
17
+ from sklearn.model_selection import train_test_split
18
+
19
+ HEADER = [
20
+ "source",
21
+ "url",
22
+ "title",
23
+ "image",
24
+ "category",
25
+ "description",
26
+ "rank",
27
+ "pubdate",
28
+ ]
29
+
30
+ OUTPUT_FILE = "ag_news.csv"
31
+ TRAIN_OUTPUT_FILE = "train.csv"
32
+ TEST_OUTPUT_FILE = "test.csv"
33
+
34
+
35
+ def _clean_text(text):
36
+ text = text.replace("\\\n", "\n")
37
+ text = html.unescape(text)
38
+
39
+ if text == "\\N":
40
+ return ""
41
+
42
+ return text
43
+
44
+
45
+ def _clean_html(text):
46
+ html_code = _clean_text(text)
47
+ html_code.replace("</p>", "\n\n</p>")
48
+ html_code.replace("<br>", "\n")
49
+
50
+ soup = BeautifulSoup(html_code, "lxml")
51
+ text = soup.get_text(separator=" ")
52
+
53
+ text = text.replace(" \n", "\n").replace("\n ", "\n")
54
+
55
+ # remove extra spaces at the beginning of the text
56
+ lines = [line.strip() for line in text.split("\n")]
57
+
58
+ return "\n".join(lines)
59
+
60
+
61
+ def _clean_image(image):
62
+ if image == "none":
63
+ return None
64
+ return image
65
+
66
+
67
+ def _clean_rank(rank):
68
+ return int(rank)
69
+
70
+
71
+ def run():
72
+ rows = []
73
+ categories = set()
74
+
75
+ with open("newsSpace", encoding="ISO-8859-15") as f:
76
+ doc = f.read()
77
+
78
+ for row in doc.split("\t\\N\n"):
79
+ if not row:
80
+ continue
81
+
82
+ row = row.replace("\\\t", "")
83
+
84
+ try:
85
+ source, url, title, image, category, description, rank, pubdate = row.split(
86
+ "\t"
87
+ )
88
+ except ValueError:
89
+ print(repr(row))
90
+ sys.exit(1)
91
+
92
+ categories.add(category)
93
+
94
+ obj = {
95
+ "source": source,
96
+ "url": url,
97
+ "title": _clean_text(title),
98
+ "image": _clean_image(image),
99
+ "category": category,
100
+ "description": _clean_text(description),
101
+ "rank": _clean_rank(rank),
102
+ "pubdate": pubdate,
103
+ "text": _clean_html(description),
104
+ }
105
+
106
+ rows.append(obj)
107
+
108
+ # Add a label to each row
109
+ _categories = list(categories)
110
+ _categories.sort()
111
+
112
+ for row in rows:
113
+ row["label"] = _categories.index(row["category"])
114
+
115
+ save_csv(rows)
116
+ split_csv_train_test(test_size=0.2, random_state=42)
117
+
118
+
119
+ def save_csv(rows, fname=OUTPUT_FILE):
120
+ """
121
+ Save the processed data into a CSV file.
122
+ """
123
+ with open(fname, "w", encoding="utf8") as f:
124
+ writer = csv.DictWriter(f, fieldnames=rows[0].keys())
125
+ writer.writeheader()
126
+
127
+ for row in rows:
128
+ writer.writerow(row)
129
+
130
+
131
+ def split_csv_train_test(**kwargs):
132
+ """
133
+ Split the data into training and testing sets.
134
+ """
135
+ df = pd.read_csv(OUTPUT_FILE)
136
+ train_df, test_df = train_test_split(df, **kwargs)
137
+ train_df.to_csv(TRAIN_OUTPUT_FILE, index=False)
138
+ test_df.to_csv(TEST_OUTPUT_FILE, index=False)
139
+
140
+
141
+ if __name__ == "__main__":
142
+ run()
test.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da30f3d131924df14ef5280f93ef45343a81a0592460a64e0847dca225f5ce70
3
+ size 169955948
train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b16743cfa3c595c010695b2ca80899fb6e52f1e91a736110d60b005e7e7b493
3
+ size 679730930