|
""" |
|
a quick script for getting wordcounts of all danish words in gigaword |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
from collections import Counter, defaultdict |
|
from pathlib import Path |
|
|
|
|
|
from typing import List, Optional, Tuple |
|
|
|
import spacy |
|
|
|
|
|
word_freq_path = "/data/DAGW/word_freqs" |
|
dagw_sektioner = "/data/DAGW/dagw-master/sektioner" |
|
|
|
|
|
|
|
|
|
nlp = spacy.load("da_core_news_lg", exclude=["parser", "ner"]) |
|
|
|
|
|
|
|
Path(word_freq_path).mkdir(parents=True, exist_ok=True) |
|
|
|
sections = os.listdir(dagw_sektioner) |
|
filepaths = {} |
|
for p in sections: |
|
subpath = os.path.join(dagw_sektioner, p) |
|
filepaths[p] = [ |
|
os.path.join(subpath, p) |
|
for p in os.listdir(subpath) |
|
if p != "LICENSE" and not p.endswith(".jsonl") |
|
] |
|
|
|
|
|
def wordpiece_group_text(text, size=500): |
|
from transformers import AutoTokenizer |
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
"Maltehb/-l-ctra-danish-electra-small-uncased", strip_accents=False |
|
) |
|
out = tokenizer.encode(text, add_special_tokens=False) |
|
|
|
prv = 0 |
|
for i in range(size, len(out), size): |
|
yield tokenizer.decode(out[prv:i]) |
|
prv = i |
|
if prv < len(out): |
|
yield tokenizer.decode(out[prv : len(out)]) |
|
|
|
|
|
def group_text(text, size=2400): |
|
length = len(text) |
|
prv = 0 |
|
for i in range(size, length, size): |
|
yield text[prv:i] |
|
prv = i |
|
if prv < length: |
|
yield text[prv:length] |
|
|
|
|
|
def text_gen(filepaths): |
|
for i, file in enumerate(filepaths): |
|
if i % 10000 == 0: |
|
print("\t", i, "/", len(filepaths)) |
|
with open(file, "r") as f: |
|
text = f.read() |
|
for t in group_text(text): |
|
yield t |
|
|
|
|
|
class WordCounter: |
|
def __init__(self, l: Optional[List] = None): |
|
self.dict = defaultdict(lambda: defaultdict(int)) |
|
if l is not None: |
|
self.add(l) |
|
|
|
def add(self, l: list): |
|
for token, pos in l: |
|
self.dict[token][pos] += 1 |
|
|
|
def __add__(self, other): |
|
for k_tok in other.dict: |
|
if k_tok in self.dict: |
|
for pos, count in other.dict[k_tok].items(): |
|
self.dict[k_tok][pos] += count |
|
else: |
|
self.dict[k_tok] = other.dict[k_tok] |
|
return self |
|
|
|
|
|
for sec in filepaths: |
|
print("Starting Section:", sec) |
|
docs = nlp.pipe(texts=text_gen(filepaths[sec]), n_process=10, batch_size=8) |
|
|
|
n = 0 |
|
word_counts = WordCounter() |
|
for i, doc in enumerate(docs, start=1): |
|
word_counts += WordCounter([(t.text, t.tag_) for t in doc]) |
|
|
|
if i % 10000 == 0: |
|
with open( |
|
os.path.join(word_freq_path, f"wordfreq_{sec}_{n}.json"), "w" |
|
) as f: |
|
json_str = json.dumps(word_counts.dict) |
|
f.write(json_str) |
|
word_counts = WordCounter() |
|
n += 1 |
|
|
|
with open(os.path.join(word_freq_path, f"wordfreq_{sec}_{n}.json"), "w") as f: |
|
json_str = json.dumps(word_counts.dict) |
|
f.write(json_str) |
|
|