xaviergillard commited on
Commit
690818b
1 Parent(s): acddb18

actually includes sentences (vonnis) in addition to pardons

Browse files
data/doc_by_doc/test.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/doc_by_doc/train.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/doc_by_doc/valid.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/full_corpus.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/line_by_line/test.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/line_by_line/train.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/line_by_line/valid.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/page_by_page/test.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/page_by_page/train.csv CHANGED
The diff for this file is too large to render. See raw diff
 
data/page_by_page/valid.csv CHANGED
The diff for this file is too large to render. See raw diff
 
todataset.py CHANGED
@@ -164,16 +164,22 @@ def prepare(df: pd.DataFrame, config_name: str):
164
  valid.to_csv("data/{config}/valid.csv".format(config=config_name))
165
 
166
  if __name__ == '__main__':
167
- #pth= r'C:\Users\xavier.gillard\Documents\ARKEY\data'
168
- #prj= "pardons"
169
- #df = pd.concat([file_to_pandas(prj, path.join(pth, f)) for f in os.listdir(path=pth)], axis=0, ignore_index=True)
170
- #df.to_csv("data/full_corpus.csv", index=False)
171
-
172
- df = pd.read_csv("data/full_corpus.csv")
173
- lines = df[["text"]]
174
- docs = documents_text(df)[["text"]]
175
- pages = pages_text(df)[["text"]]
176
 
 
 
 
 
 
 
 
 
177
  prepare(lines, "line_by_line")
178
  prepare(pages, "page_by_page")
179
  prepare(docs, "doc_by_doc")
 
164
  valid.to_csv("data/{config}/valid.csv".format(config=config_name))
165
 
166
  if __name__ == '__main__':
167
+ pth= r'C:\Users\xavier.gillard\Documents\ARKEY\data'
168
+ prj= "pardons"
169
+ df1 = pd.concat([file_to_pandas(prj, path.join(pth, f)) for f in os.listdir(path=pth)], axis=0, ignore_index=True)
170
+
171
+ pth = r'C:\Users\xavier.gillard\Documents\REPO\vonnis'
172
+ prj = "sentences"
173
+ df2 = pd.concat([file_to_pandas(prj, path.join(pth, f)) for f in os.listdir(path=pth)], axis=0, ignore_index=True)
 
 
174
 
175
+ df = pd.concat([df1, df2], axis=0, ignore_index=True)
176
+ df.to_csv("data/full_corpus.csv", index=False)
177
+
178
+ #df = pd.read_csv("data/full_corpus.csv")
179
+ lines = df[["project","file_id","page_id","line_id","text"]]
180
+ pages = pages_text(df)[["project","file_id","page_id","text"]]
181
+ docs = documents_text(df)[["project","file_id","text"]]
182
+
183
  prepare(lines, "line_by_line")
184
  prepare(pages, "page_by_page")
185
  prepare(docs, "doc_by_doc")
vonnis.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path as path
3
+
4
+
5
+ data = [
6
+ ('661', r"C:\Users\xavier.gillard\Documents\ARKEY\Corpora\Vonnis\661_a\541_0009_000_00661_000_0_0344\txt"),
7
+ ('691', r"C:\Users\xavier.gillard\Documents\ARKEY\Corpora\Vonnis\691\541_0009_000_00691_000_0_0374\txt"),
8
+ ('875', r"C:\Users\xavier.gillard\Documents\ARKEY\Corpora\Vonnis\875\541_0009_000_00875_000_0_0236\txt")
9
+ ]
10
+
11
+
12
+ for (k, pth) in data:
13
+ with open("vonnis/{}.txt".format(k), "w") as out:
14
+ files = [ path.join(pth, e) for e in os.listdir(pth) ]
15
+ files.sort()
16
+
17
+ for f in files:
18
+ with open(f) as input:
19
+ print("###", file=out)
20
+ text = input.read()
21
+ print(text, file=out)