import spacy
import os
import json
import jsonlines


class ConceptNetExtractor(object):
    def __init__(self, data_path):
        self.data_path = data_path

    @staticmethod
    def _del_pos(s):
        """
        Deletes part-of-speech encoding from an entity string, if present.
        :param s: Entity string.
        :return: Entity string with part-of-speech encoding removed.
        """
        if s.endswith("/n") or s.endswith("/a") or s.endswith("/v") or s.endswith("/r"):
            s = s[:-2]
        return s

    def _parse(self, line):
        ls = line.split('\t')
        if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'):
            relation = ls[1].split("/")[-1].lower()
            head = self._del_pos(ls[2]).split("/")[-1].lower()
            tail = self._del_pos(ls[3]).split("/")[-1].lower()
        else:
            return None

        if not head.replace("_", "").replace("-", "").isalpha():
            return None

        if not tail.replace("_", "").replace("-", "").isalpha():
            return None

        if relation.startswith("*"):
            relation = relation[1:]
            tmp = head
            head = tail
            tail = tmp

        data = json.loads(ls[4])

        line = relation + "," + head + "," + tail + "," + str(data["weight"]) + "\n"

        return line


class WikiSentenceGenerator(object):
    def __init__(self, data_path):
        self.data_path = data_path

    def _wiki_segment_paths(self):
        for segment_dir in os.listdir(self.data_path):
            segment_dir_path = os.path.join(self.data_path, segment_dir)
            for segment_path in os.listdir(segment_dir_path):
                yield os.path.join(segment_dir_path, segment_path)

    def _load_text(self):
        segment_path = self._wiki_segment_paths()
        for path in segment_path:
            with jsonlines.open(path) as reader:
                for obj in reader.iter(type=dict, skip_invalid=True):
                    yield obj["text"]
            # with open(path) as segment_file:
            #     for item in json_lines.reader(segment_file):
            #         yield item["text"]


# Testing
if __name__ == '__main__':
    wikiSentenceGen = WikiSentenceGenerator(data_path="../Data/Wiki/WikiSegments")
    gen = wikiSentenceGen._wiki_segment_paths()
    index = 0

    for path in gen:
        print(path)
        index = index + 1
        if index is 1000:
            break


