/*
 * Copyright (c) 2013-2018 GraphAware
 *
 * This file is part of the GraphAware Framework.
 *
 * GraphAware Framework is free software: you can redistribute it and/or modify it under the terms of
 * the GNU General Public License as published by the Free Software Foundation, either
 * version 3 of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 * See the GNU General Public License for more details. You should have received a copy of
 * the GNU General Public License along with this program.  If not, see
 * <http://www.gnu.org/licenses/>.
 */
package com.graphaware.nlp.ml.textrank;

import static com.graphaware.nlp.util.TypeConverter.*;
import com.graphaware.nlp.ml.pagerank.CoOccurrenceItem;
import com.graphaware.nlp.ml.pagerank.PageRank;
import com.graphaware.common.util.Pair;
import com.graphaware.nlp.NLPManager;
import com.graphaware.nlp.configuration.DynamicConfiguration;
import com.graphaware.nlp.domain.Keyword;
import com.graphaware.nlp.domain.TfIdfObject;
import com.graphaware.nlp.persistence.constants.Labels;
import com.graphaware.nlp.persistence.constants.Relationships;
import com.graphaware.nlp.dsl.request.PipelineSpecification;
import org.neo4j.graphdb.*;
import org.neo4j.logging.Log;
import com.graphaware.common.log.LoggerFactory;

import java.util.*;
import java.util.stream.Collectors;
import java.util.concurrent.atomic.AtomicReference;

public class TextRank {

    private static final Log LOG = LoggerFactory.getLogger(TextRank.class);

    private static final String COOCCURRENCE_QUERY
            = "MATCH (a:AnnotatedText)-[:CONTAINS_SENTENCE]->(s:Sentence)-[:SENTENCE_TAG_OCCURRENCE]->(to:TagOccurrence)\n"
            + "WHERE id(a) = {id}\n"
            + "WITH to\n"
            + "ORDER BY to.startPosition\n"
            + "MATCH (to)-[:TAG_OCCURRENCE_TAG]->(t:Tag)\n"
            + "WHERE size(t.value) > 2 AND NOT(toLower(t.value) IN {stopwords}) AND NOT ANY(pos IN to.pos WHERE pos IN {forbiddenPOSs}) AND NOT ANY(l IN labels(t) WHERE l IN {forbiddenNEs})\n"
            + "WITH collect(t) as tags, collect(to) as tagsPosition\n"
            + "UNWIND range(0, size(tags) - 2, 1) as i\n"
            + "RETURN id(tags[i]) as tag1, id(tags[i+1]) as tag2, tags[i].id as tag1_id, tags[i+1].id as tag2_id, "
            + "tagsPosition[i].startPosition as sourceStartPosition, tagsPosition[i].endPosition as sourceEndPosition, "
            + "tagsPosition[i+1].startPosition as destinationStartPosition, tagsPosition[i+1].endPosition as destinationEndPosition, tagsPosition[i].pos as pos1, tagsPosition[i+1].pos as pos2";

    private static final String COOCCURRENCE_QUERY_BY_SENTENCE
            = "MATCH (a:AnnotatedText)-[:CONTAINS_SENTENCE]->(s:Sentence)-[:SENTENCE_TAG_OCCURRENCE]->(to:TagOccurrence)\n"
            + "WHERE id(a) = {id} \n"
            + "WITH s, to\n"
            + "ORDER BY s.sentenceNumber, to.startPosition\n"
            + "MATCH (to)-[:TAG_OCCURRENCE_TAG]->(t:Tag)\n"
            + "WHERE size(t.value) > 2 AND NOT(toLower(t.value) IN {stopwords}) AND NOT ANY(pos IN to.pos WHERE pos IN {forbiddenPOSs}) AND NOT ANY(l IN labels(t) WHERE l IN {forbiddenNEs})\n"
            + "WITH s, collect(t) as tags, collect(to) as tagsPosition\n"
            + "ORDER BY s.sentenceNumber\n"
            + "UNWIND range(0, size(tags) - 2, 1) as i\n"
            + "RETURN s, id(tags[i]) as tag1, id(tags[i+1]) as tag2, tags[i].id as tag1_id, tags[i+1].id as tag2_id, "
            + "tagsPosition[i].startPosition as sourceStartPosition, tagsPosition[i].endPosition as sourceEndPosition, "
            + "tagsPosition[i+1].startPosition as destinationStartPosition, tagsPosition[i+1].endPosition as destinationEndPosition, tagsPosition[i].pos as pos1, tagsPosition[i+1].pos as pos2";

    private static final String COOCCURRENCE_QUERY_FROM_DEPENDENCIES
            = "MATCH (a:AnnotatedText)-[:CONTAINS_SENTENCE]->(s:Sentence)-[:SENTENCE_TAG_OCCURRENCE]->(to:TagOccurrence)\n"
            + "WHERE id(a) = {id}\n"
            + "WITH to\n"
            + "OPTIONAL MATCH (to)-[r]-(to2:TagOccurrence)\n"
            + "WHERE to <> to2 AND to.startPosition < to2.startPosition\n"
            + "WITH to, to2, r\n"
            + "MATCH (to)-[:TAG_OCCURRENCE_TAG]->(t:Tag)\n"
            + "WHERE size(t.value) > 2 AND NOT(toLower(t.value) IN {stopwords}) AND NOT ANY(pos IN to.pos WHERE pos IN {forbiddenPOSs}) AND NOT ANY(l IN labels(t) WHERE l IN {forbiddenNEs})\n"
            + "OPTIONAL MATCH (to2)-[:TAG_OCCURRENCE_TAG]->(t2:Tag)\n"
            + "WHERE size(t2.value) > 2 AND NOT(toLower(t2.value) IN {stopwords}) AND NOT ANY(pos IN to2.pos WHERE pos IN {forbiddenPOSs}) AND NOT ANY(l IN labels(t2) WHERE l IN {forbiddenNEs})\n"
            + "RETURN id(t) as tag1, id(t2) as tag2, t.id as tag1_id, t2.id as tag2_id, "
            + "to.startPosition as sourceStartPosition, to.endPosition as sourceEndPosition, to2.startPosition as destinationStartPosition, to2.endPosition as destinationEndPosition, "
            + "to.pos as pos1, to2.pos as pos2, collect(type(r))\n"
            + "ORDER BY sourceStartPosition, destinationStartPosition";

    private static final String GET_TAG_QUERY = "MATCH (node:Tag)<-[:TAG_OCCURRENCE_TAG]-(to:TagOccurrence)<-[:SENTENCE_TAG_OCCURRENCE]-(:Sentence)<-[:CONTAINS_SENTENCE]-(a:AnnotatedText)\n"
            + "WHERE id(a) = {id} AND NOT (toLower(node.value) IN {stopwords})"
            + "OPTIONAL MATCH (to)<-[:COMPOUND|AMOD]-(to2:TagOccurrence)-[:TAG_OCCURRENCE_TAG]->(t2:Tag)\n"
            + "WHERE NOT exists(to2.pos) OR size(to2.pos) = 0 OR ANY(p IN to2.pos WHERE p IN {posList}) AND NOT (toLower(t2.value) IN {stopwords})\n"
            + "RETURN node.id as tag, to.startPosition as sP, to.endPosition as eP, id(node) as tagId, "
            + "collect(id(t2)) as rel_tags, collect(to2.startPosition) as rel_tos,  collect(to2.endPosition) as rel_toe, labels(node) as labels\n"
            + "ORDER BY sP asc";

    private static final String PIPELINE_WITHOUT_NER = "CORE.TEXTRANK_PIPELINE";
    public static final TfIdfObject TFIDF_1_1 = new TfIdfObject(1., 1.);
    public static final TfIdfObject TFIDF_0_0 = new TfIdfObject(0., 0.);

    private final GraphDatabaseService database;
    private final boolean removeStopWords;
    private final boolean directionsMatter;
    private final boolean respectSentences;
    private final boolean useDependencies;
    private final boolean cooccurrencesFromDependencies;
    private final boolean cleanKeywords;
    private final boolean expandNEs;
    private final double topxTags;
    private final Label keywordLabel;
    private final Set<String> stopWords;
    private final List<String> admittedPOSs;
    private final List<String> forbiddenNEs;
    private final List<String> forbiddenPOSs;
    private Map<Long, List<Long>> neExpanded;
    private final Map<Long, String> idToValue = new HashMap<>();

    public TextRank(GraphDatabaseService database,
                    boolean removeStopWords,
                    boolean directionsMatter,
                    boolean respectSentences,
                    boolean useDependencies,
                    boolean cooccurrencesFromDependencies,
                    boolean cleanKeywords,
                    double topxTags,
                    Label keywordLabel,
                    Set<String> stopWords,
                    List<String> admittedPOSs,
                    List<String> forbiddenNEs,
                    List<String> forbiddenPOSs) {
        this.database = database;
        this.removeStopWords = removeStopWords;
        this.directionsMatter = directionsMatter;
        this.respectSentences = respectSentences;
        this.useDependencies = useDependencies;
        this.cooccurrencesFromDependencies = cooccurrencesFromDependencies;
        this.cleanKeywords = cleanKeywords;
        this.expandNEs = true; // not useful making this user-customizable
        this.topxTags = topxTags;
        this.keywordLabel = keywordLabel;
        this.stopWords = stopWords;
        this.admittedPOSs = admittedPOSs;
        this.forbiddenNEs = forbiddenNEs;
        this.forbiddenPOSs = forbiddenPOSs;

    }

    private String getPipelineWithoutNEs(String language) {
        String name = getPipelineWithoutNerKey(language);
        if (!NLPManager.getInstance().getTextProcessorsManager().hasPipeline(name)) {
            Map<String, Object> params = new HashMap<>();
            params.put("tokenize", true);
            params.put("ner", false);
            String processor = NLPManager.getInstance().getTextProcessorsManager().getDefaultProcessor().getClass().getName();
            PipelineSpecification ps = new PipelineSpecification(getPipelineWithoutNerKey(language), processor);
            ps.setProcessingSteps(params);
            NLPManager.getInstance().getTextProcessorsManager().addPipeline(ps);
        }
        return name;
    }

    private String getPipelineWithoutNerKey(String language) {
        return PIPELINE_WITHOUT_NER + "_" + language;
    }

    public Map<Long, Map<Long, CoOccurrenceItem>> createCooccurrences(List<Node> annotatedTexts, String language, boolean fromDependencies) {
        String query;
        if (fromDependencies)
            query = COOCCURRENCE_QUERY_FROM_DEPENDENCIES;
        else if (respectSentences) {
            query = COOCCURRENCE_QUERY_BY_SENTENCE;
        } else {
            query = COOCCURRENCE_QUERY;
        }

        Map<String, Object> params = new HashMap<>();
        params.put("stopwords", stopWords);
        params.put("forbiddenPOSs", forbiddenPOSs);
        params.put("forbiddenNEs", forbiddenNEs);
        //params.put("forbiddenNEs", new ArrayList<String>());
        //params.put("forbiddenNEs", Arrays.asList("NER_Number", "NER_Ordinal", "NER_Percent", "NER_Duration"));

        if (fromDependencies) {
            params.put("stopwords", new ArrayList<>());
            params.put("forbiddenPOSs", new ArrayList<>());
            params.put("forbiddenNEs", new ArrayList<>());
        }

        LOG.debug("\n Number of annotated texts: " + annotatedTexts.size());
        List<CoOccurrenceItem> prelim = new ArrayList<>();
        for (Node node : annotatedTexts) {
            params.put("id", node.getId());
            processOneAnnotatedText(query, params, prelim);
        }
        if (prelim.isEmpty()) {
            LOG.warn("Nothing to do: no co-occurrence passing cleansing criteria found.");
            return null;
        }

        Map<Long, List<Pair<Long, Long>>> neExp;
        if (expandNEs && !fromDependencies) {
            // process named entities: split them into individual tokens by calling ga.nlp.annotate(), assign them IDs and create co-occurrences
            neExp = expandNamedEntities(language);
            neExpanded = neExp.entrySet().stream()
                    .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().stream().map(p -> p.second()).collect(Collectors.toList())));
        } else
            neExp = new HashMap<>();

        Map<Long, Map<Long, CoOccurrenceItem>> results = computeCooccurrent(fromDependencies, prelim, neExp);
        return results;
    }

    private Map<Long, Map<Long, CoOccurrenceItem>> computeCooccurrent(boolean fromDependencies, List<CoOccurrenceItem> prelim, Map<Long, List<Pair<Long, Long>>> neExp) {
        Map<Long, Map<Long, CoOccurrenceItem>> results = new HashMap<>();
        long neVisited = 0L;
        for (CoOccurrenceItem it : prelim) {
            Long tag1 = it.getSource();
            Long tag2 = it.getDestination();
            int tag1Start = it.getStartPositions().get(0).first().intValue();
            int tag2Start = it.getStartPositions().get(0).second().intValue();

            if (expandNEs && !fromDependencies) {
                if (neExp.containsKey(tag1)) {
                    if (neVisited == 0L || neVisited != tag1.longValue()) {
                        connectTagsInNE(results, neExp.get(tag1), tag1Start);
                        neVisited = 0L;
                    }
                    tag1Start += neExp.get(tag1).get(neExp.get(tag1).size() - 1).first().intValue();
                    tag1 = neExp.get(tag1).get(neExp.get(tag1).size() - 1).second();
                }

                if (neExp.containsKey(tag2)) {
                    connectTagsInNE(results, neExp.get(tag2), tag2Start);
                    neVisited = tag2;
                    tag2 = neExp.get(tag2).get(0).second();
                } else
                    neVisited = 0L;
            }

            addTagToCoOccurrence(results, tag1, tag1Start, tag2, tag2Start);
            if (!directionsMatter) { // when direction of co-occurrence relationships is not important
                addTagToCoOccurrence(results, tag2, tag2Start, tag1, tag1Start);
            }

        }
        return results;
    }

    private void processOneAnnotatedText(String query, Map<String, Object> params, List<CoOccurrenceItem> prelim) {
        int offsetStart = 0;
        if (!prelim.isEmpty()) {
            // find the last word position from previous annotated text and add some number so we can merge previous text with the coming one
            offsetStart = prelim.get(prelim.size() - 1).getEndPositions().get(0).second().intValue() + 2;
            LOG.debug("\n Processing another AnnotatedText. Offset: " + offsetStart);
        }

        Result res = null;
        try (Transaction tx = database.beginTx();) {
            res = database.execute(query, params);
            tx.success();
        } catch (Exception e) {
            LOG.error("Error while creating co-occurrences: ", e);
        }

        while (res != null && res.hasNext()) {
            Map<String, Object> next = res.next();
            Long tag1 = toLong(next.get("tag1"));
            Long tag2 = toLong(next.get("tag2"));
            String tagVal1 = (String) next.get("tag1_id");
            String tagVal2 = (String) next.get("tag2_id");
            Long tag1Start = offsetStart + toLong(next.get("sourceStartPosition"));
            Long tag2Start = offsetStart + toLong(next.get("destinationStartPosition"));
            Long tag1End = offsetStart + toLong(next.get("sourceEndPosition"));
            Long tag2End = offsetStart + toLong(next.get("destinationEndPosition"));
            List<String> pos1 = next.get("pos1") != null ? Arrays.asList((String[]) next.get("pos1")) : new ArrayList<>();
            List<String> pos2 = next.get("pos2") != null ? Arrays.asList((String[]) next.get("pos2")) : new ArrayList<>();

            // check whether POS of both tags are admitted
            boolean bPOS1 = pos1.stream().filter(pos -> admittedPOSs.contains(pos)).count() != 0 || pos1.size() == 0;
            boolean bPOS2 = pos2.stream().filter(pos -> admittedPOSs.contains(pos)).count() != 0 || pos2.size() == 0;

            // fill tag co-occurrences (adjacency matrix)
            if (bPOS1 && bPOS2 && tagVal1 != null && tagVal2 != null) {
                CoOccurrenceItem co = new CoOccurrenceItem(tag1, tag1Start.intValue(), tag2, tag2Start.intValue());
                co.addEndPositions(tag1End.intValue(), tag2End.intValue());
                prelim.add(co);
            }

            // for logging purposes and for `expandNamedEntities()`
            if (tag1 != null)
                idToValue.put(tag1, tagVal1);
            if (tag2 != null)
                idToValue.put(tag2, tagVal2);
        }
    }

    private void addTagToCoOccurrence(Map<Long, Map<Long, CoOccurrenceItem>> results, Long source, int sourceStartPosition, Long destination, int destinationStartPosition) {
        Map<Long, CoOccurrenceItem> mapTag1;
        if (!results.containsKey(source)) {
            mapTag1 = new HashMap<>();
            results.put(source, mapTag1);
        } else {
            mapTag1 = results.get(source);
        }

        if (mapTag1.containsKey(destination)) {
            CoOccurrenceItem ccEntry = mapTag1.get(destination);
            ccEntry.incCount();
            ccEntry.addPositions(sourceStartPosition, destinationStartPosition);
        } else {
            mapTag1.put(destination, new CoOccurrenceItem(source, sourceStartPosition, destination, destinationStartPosition));
        }
    }

    private void connectTagsInNE(Map<Long, Map<Long, CoOccurrenceItem>> results, List<Pair<Long, Long>> tags, int startOffset) {
        int n = tags.size();
        for (int i = 0; i < n - 1; i++) {
            for (int j = i + 1; j < n; j++) {
                addTagToCoOccurrence(results, tags.get(i).second(), startOffset + tags.get(i).first().intValue(), tags.get(j).second(), startOffset + tags.get(j).first().intValue());
                if (!directionsMatter) { // when direction of co-occurrence relationships is not important
                    addTagToCoOccurrence(results, tags.get(j).second(), startOffset + tags.get(j).first().intValue(), tags.get(i).second(), startOffset + tags.get(i).first().intValue());
                }
            }
        }
    }

    private Map<Long, List<Pair<Long, Long>>> expandNamedEntities(String language) {
        Map<String, Object> parameters = new HashMap<>();
        parameters.put("name", getPipelineWithoutNEs(language));

        Map<String, Object> p = new HashMap<>();
        p.put("params", parameters);

        Map<Long, List<Pair<Long, Long>>> result = new HashMap<>();
        Map<Long, String> newIdsToVal = new HashMap<>();

        long nextNewId = -2L;
        for (Long valueL : idToValue.keySet()) {
            if (idToValue.get(valueL).trim().split(" ").length < 2)
                continue;
            String str = idToValue.get(valueL).toLowerCase().split("_")[0].trim();
            p.put("text", str);
            List<Pair<Long, Long>> res = new ArrayList<>();
            try (Transaction tx = database.beginTx()) {
                Result r = database.execute(
                        "WITH ga.nlp.processor.annotate({text}, {params}) AS annotated\n"
                                + "with keys(annotated.sentences[0].tagOccurrences) as keys, annotated\n"
                                + "unwind keys as k\n"
                                + "with toInteger(k) as kInt, annotated\n"
                                + "order by kInt asc\n"
                                + "return kInt as start, annotated.sentences[0].tagOccurrences[toString(kInt)][0].element.id as tagVal"
                        , p);
                while (r.hasNext()) {
                    Map<String, Object> next = r.next();
                    Long start = (Long) next.get("start");
                    String val = (String) next.get("tagVal");
                    List<Long> lId = idToValue.entrySet().stream().filter(en -> en.getValue().equals(val) || en.getValue().equalsIgnoreCase(val)).map(Map.Entry::getKey).collect(Collectors.toList());
                    List<Long> lIdNew = newIdsToVal.entrySet().stream().filter(en -> en.getValue().equals(val) || en.getValue().equalsIgnoreCase(val)).map(Map.Entry::getKey).collect(Collectors.toList());
                    if (lId != null && lId.size() > 0) {
                        res.add(new Pair<>(start, lId.get(0)));
                    } else if (lIdNew != null && lIdNew.size() > 0) {
                        res.add(new Pair<>(start, lIdNew.get(0)));
                    } else {
                        res.add(new Pair<>(start, nextNewId));
                        newIdsToVal.put(nextNewId, val);
                        nextNewId -= 1L;
                    }
                }
                r.close();
                tx.success();
            }
            if (res.size() > 0)
                result.put(valueL, res); // map: id(NE) -> ListOfIndividualTags(Pair(startPostion, tagId))
        }

        // finish by adding newly assigned IDs to idToValue map
        for (Long key : newIdsToVal.keySet())
            idToValue.put(key, newIdsToVal.get(key));

        return result;
    }

    public TextRankResult evaluate(List<Node> annotatedTexts, String language, int iter, double damp, double threshold) {
        Map<Long, Map<Long, CoOccurrenceItem>> coOccurrence = createCooccurrences(annotatedTexts, language, cooccurrencesFromDependencies);
        if (coOccurrence == null) {
            return TextRankResult.SUCCESS(new HashMap<>());
        }
        PageRank pageRank = new PageRank(database);
        Map<Long, Double> pageRanks = pageRank.run(coOccurrence, iter, damp, threshold);

        if (cooccurrencesFromDependencies) {
            coOccurrence.clear();
            coOccurrence = createCooccurrences(annotatedTexts, language, false); // co-occurrences from natural word flow; needed for merging keywords into key phrases
        }

        if (pageRanks == null) {
            LOG.error("Page ranks not retrieved, aborting evaluate() method ...");
            return TextRankResult.FAILED("Page ranks not retrieved");
        }

        int n_oneThird = (int) (pageRanks.size() * topxTags);
        List<Long> topThird = getTopX(pageRanks, n_oneThird);

        LOG.info("Keyword candidates are top " + n_oneThird + " tags from this list:");
        pageRanks.entrySet().stream()
                .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
                .forEach(en -> LOG.debug("   " + idToValue.get(en.getKey()) + ": " + en.getValue()));

        Map<String, Object> params = new HashMap<>();
        params.put("posList", admittedPOSs);
        params.put("stopwords", removeStopWords ? stopWords : new ArrayList<>());

        // Detail tag analysis - get start & end positions and related tags (dependencies)
        List<KeywordExtractedItem> keywordsOccurrences = new ArrayList<>();
        Map<Long, KeywordExtractedItem> keywordMap = new HashMap<>();
        List<Long> wrongNEs = new ArrayList<>();
        for (Node node : annotatedTexts) {
            params.put("id", node.getId());
            detailedTagAnalysis(GET_TAG_QUERY, params, pageRanks, keywordsOccurrences, keywordMap, wrongNEs);
        }

        Map<String, Keyword> results = new HashMap<>();

        while (!keywordsOccurrences.isEmpty()) {
            final AtomicReference<KeywordExtractedItem> keywordOccurrence
                    = new AtomicReference<>(keywordsOccurrences.remove(0));
            final AtomicReference<String> currValue = new AtomicReference<>(keywordOccurrence.get().getValue());
            final AtomicReference<Double> currRelevance = new AtomicReference<>(keywordOccurrence.get().getRelevance());
            final AtomicReference<Integer> currNTopRated = new AtomicReference<>(0);
            Set<Long> relTagIDs = getRelTagsIntoDepth(keywordOccurrence.get(), keywordsOccurrences);
            relTagIDs.retainAll(topThird); // keep only those that are among top 1/3
            if (!useDependencies && !topThird.contains(keywordOccurrence.get().getTagId())) // if useDependencies==false, keep only those keywords that are among top 1/3
                continue;
            if (useDependencies && !topThird.contains(keywordOccurrence.get().getTagId()) && relTagIDs.size() == 0)
                continue;
            Map<String, Keyword> localResults;
            if (topThird.contains(keywordOccurrence.get().getTagId()))
                currNTopRated.set(currNTopRated.get() + 1);
            do {
                int endPosition = keywordOccurrence.get().getEndPosition();
                localResults = checkNextKeyword(keywordOccurrence.get(), coOccurrence, keywordMap);
                if (localResults.size() > 0) {
                    keywordOccurrence.set(null);
                    localResults.entrySet().stream().forEach((item) -> {
                        KeywordExtractedItem nextKeyword = keywordsOccurrences.get(0);
                        if (nextKeyword != null && nextKeyword.getValue().equalsIgnoreCase(item.getKey())
                                && (topThird.contains(nextKeyword.getTagId()) || useDependencies)
                                && (nextKeyword.getStartPosition() - endPosition) == 1) // crucial condition for graphs from co-occurrences, but very useful also for graphs from dependencies
                        {
                            String newCurrValue = currValue.get().trim().split("_")[0] + " " + item.getKey();
                            double newCurrRelevance = currRelevance.get() + item.getValue().getRelevance();
                            if (topThird.contains(nextKeyword.getTagId()))
                                currNTopRated.set(currNTopRated.get() + 1);
                            currValue.set(newCurrValue);
                            currRelevance.set(newCurrRelevance);
                            keywordOccurrence.set(nextKeyword);
                            keywordsOccurrences.remove(0);
                        }
                    });
                }
            } while (!localResults.isEmpty() && keywordOccurrence.get() != null);
            if (currNTopRated.get() > 0)
                addToResults(currValue.get(), currRelevance.get(), TFIDF_1_1, currNTopRated.get(), results, 1);
        }

        if (expandNEs) {
            // add named entities that contain at least some of the top 1/3 of words
            for (Long key : neExpanded.keySet()) {
                if (neExpanded.get(key).stream().filter(v -> topThird.contains(v)).count() == 0)
                    continue;
                if (wrongNEs.contains(key))
                    continue;
                String keystr = idToValue.get(key);//.toLowerCase();
                double pr = pageRanks.containsKey(key) ? pageRanks.get(key) : 0.;
                if (pr == 0.) // set PageRank value of a NE to max value of PR of it's composite words
                    pr = (double) pageRanks.entrySet().stream()
                            .filter(en -> neExpanded.get(key).contains(en.getKey()))
                            .mapToDouble(en -> en.getValue())
                            .max().orElse(0.);
                addToResults(keystr,
                        pr,
                        TFIDF_1_1,
                        (int) (neExpanded.get(key).stream().filter(v -> topThird.contains(v)).count()),
                        results,
                        1);
            }
        }

        computeTotalOccurrence(results);
        if (cleanKeywords) {
            results = cleanFinalKeywords(results, n_oneThird);
        }
        return TextRankResult.SUCCESS(results);
    }

    private void detailedTagAnalysis(String GET_TAG_QUERY, Map<String, Object> params, Map<Long, Double> pageRanks, List<KeywordExtractedItem> keywordsOccurrences, Map<Long, KeywordExtractedItem> keywordMap, List<Long> wrongNEs) {
        // find the last word position from previous annotated text and add some number so we can merge previous text with the coming one
        final int offset_start = keywordsOccurrences.isEmpty() ? 0 : keywordsOccurrences.get(keywordsOccurrences.size() - 1).getEndPosition() + 2;

        try (Transaction tx = database.beginTx()) {
            Result res = database.execute(GET_TAG_QUERY, params);
            while (res != null && res.hasNext()) {
                Map<String, Object> next = res.next();
                long tagId = (long) next.get("tagId");

                // remove stop-NEs
                if (iterableToList((Iterable<String>) next.get("labels")).stream().anyMatch(el -> forbiddenNEs.contains(el))) {
                    wrongNEs.add(tagId);
                    continue;
                }

                KeywordExtractedItem item = new KeywordExtractedItem(tagId);
                item.setValue(((String) next.get("tag")));
                item.setStartPosition(((Number) next.get("sP")).intValue() + offset_start);
                item.setEndPosition(((Number) next.get("eP")).intValue() + offset_start);
                item.setRelatedTags(iterableToList((Iterable<Long>) next.get("rel_tags")));
                item.setRelTagStartingPoints(iterableToList((Iterable<Number>) next.get("rel_tos")).stream().map(el -> Long.valueOf(el.intValue() + offset_start)).collect(Collectors.toList()));
                item.setRelTagEndingPoints(iterableToList((Iterable<Number>) next.get("rel_toe")).stream().map(el -> Long.valueOf(el.intValue() + offset_start)).collect(Collectors.toList()));
                item.setRelevance(pageRanks.containsKey(tagId) ? pageRanks.get(tagId) : 0);
                keywordsOccurrences.add(item);
                if (!keywordMap.containsKey(tagId)) {
                    keywordMap.put(tagId, item);
                } else {
                    keywordMap.get(tagId).update(item);
                }
                //LOG.debug(" Adding for " + item.getValue() + ": " + item.getRelatedTags());
            }
            if (res != null) {
                res.close();
            }
            tx.success();
        } catch (Exception e) {
            LOG.error("Error while running TextRank evaluation: ", e);
        }
    }

    private Map<String, Keyword> checkNextKeyword(KeywordExtractedItem keywordOccurrence, Map<Long, Map<Long, CoOccurrenceItem>> coOccurrences, Map<Long, KeywordExtractedItem> keywords) {
        long tagId = keywordOccurrence.getTagId();
        Map<String, Keyword> results = new HashMap<>();
        if (!coOccurrences.containsKey(tagId))
            return results;

        Map<Integer, Set<Long>> mapStartId = createCoOccurrenceMapping(coOccurrences.get(tagId)); // mapping: sourceStartPosition -> Set(destination tagIDs)
        Set<Long> coOccurrence = mapStartId.get(keywordOccurrence.getStartPosition());
        if (coOccurrence == null) {
            return results;
        }

        Iterator<Long> iterator = coOccurrence.stream()
                .filter((ccEntry) -> ccEntry != tagId)
                .filter((ccEntry) -> keywords.containsKey(ccEntry))
                .iterator();

        while (iterator.hasNext()) {
            Long ccEntry = iterator.next();
            String relValue = keywords.get(ccEntry).getValue();
            List<Long> merged = new ArrayList<>(keywords.get(ccEntry).getRelatedTags());
            merged.retainAll(keywordOccurrence.getRelatedTags()); // new
            // TO DO: even when using dependencies, we should be able to merge words that are next to each other but that have no dependency (?)
            if (!useDependencies || keywordOccurrence.getRelatedTags().contains(keywords.get(ccEntry).getTagId()) || merged.size() > 0) {
                addToResults(relValue,
                        keywords.get(ccEntry).getRelevance(),
                        TFIDF_0_0,
                        0,
                        results,
                        1);
            }
        }

        return results;
    }

    private void addToResults(String res, double relevance, TfIdfObject tfidf, int nTopRated, Map<String, Keyword> results, int occurrences) {
        //LOG.debug("addToResults: " + res + " " + relevance + " " + occurrences);
        if (res != null) {
            String resLower = res.toLowerCase();
            if (results.containsKey(resLower)) {
                results.get(resLower).incCountsBy(occurrences);
                //LOG.debug("+inc");
            } else {
                final Keyword keyword = new Keyword(resLower, occurrences);
                keyword.setOriginalTagId(res);
                keyword.setRelevance(relevance);
                keyword.setTf(tfidf.getTf());
                keyword.setIdf(tfidf.getIdf());
                keyword.setNTopRated(nTopRated);
                results.put(resLower, keyword);
            }
        }
    }

    private Set<Long> getRelTagsIntoDepth(KeywordExtractedItem kwOccurrence, List<KeywordExtractedItem> kwOccurrences) {
        Set<Long> relTags = new HashSet<>(kwOccurrence.getRelatedTags());
        kwOccurrences.stream()
                //.filter(el -> relTags.contains(el.getValue()))
                .filter(el -> relTags.contains(el.getTagId()) || el.getRelatedTags().stream().anyMatch(l -> relTags.contains(l)))
                .forEach(el -> {
                    relTags.addAll(el.getRelatedTags());
                    relTags.add(el.getTagId());
                });
        return relTags;
    }

    public boolean postProcess(String method, Node annotatedText) {
        // if a keyphrase in current document contains a keyphrase from any other document, create also DESCRIBES relationship to that other keyphrase
        // if an annotated text do not have any keyword, not returning here will assume that we have to run it on the full graph
        Set<Long> inputKeywordIds = getKeywordIds(annotatedText);
        if (annotatedText != null && inputKeywordIds.size() < 1) {
            return true;
        }

        if (method.equals("direct")) {
            String query = getQueryDirect(inputKeywordIds, keywordLabel.name());

            try (Transaction tx = database.beginTx();) {
                LOG.info("Running identification of sub-keyphrases ...");
                database.execute(query, Collections.singletonMap("ids", inputKeywordIds));
                tx.success();
            } catch (Exception e) {
                LOG.error("Error while running TextRank post-processing (identification of sub-keyphrases): ", e);
                return false;
            }
        } else if (method.equals("subgroups")) {
            // add HAS_SUBGROUP relationships between keywords, ex.: (station) -[HAS_SUBGROUP]-> (space station) -[HAS_SUBGROUP]-> (international space station)
            String query = getQuerySubGroups(inputKeywordIds, keywordLabel.name());

            try (Transaction tx = database.beginTx();) {
                if (annotatedText != null) {
                    LOG.info("input annotated text id : " + annotatedText.getId());
                }
                LOG.info("Discovering HAS_SUBGROUP relationships between keywords and keyphrases ...");
                database.execute(query, Collections.singletonMap("ids", inputKeywordIds));
                tx.success();
            } catch (Exception e) {
                LOG.error("Error while running TextRank post-processing (discovering HAS_SUBGROUP relationships): ", e);
                return false;
            }
        } else {
            throw new RuntimeException("Unknown post-processing method. Available methods: 'direct', 'subgroups'");
        }

        return true;
    }

    private static String getQuerySubGroups(Set<Long> inputKeywordIds, String label) {
        String query = "match (k:" + label + ")\n";
        if (inputKeywordIds.size() > 0) {
            query += " WHERE id(k) IN {ids}\n" +
                    "AND ";
        } else {
            query += "WHERE ";
        }
        query += "k.numTerms > 1\n"
                + "with k WHERE true \n"
                + "match (k2:" + label + ")\n"
                + "where k2.value STARTS WITH (k.value + ' ') OR k2.value ENDS WITH (' ' + k.value)\n"
                + "merge (k)-[r:HAS_SUBGROUP]->(k2)";
        return query;
    }

    private static String getQueryDirect(Set<Long> inputKeywordIds, String label) {
        String query = "match (k:" + label + ")\n";
        if (inputKeywordIds.size() > 0) {
            query += " WHERE id(k) IN {ids}\n" +
                    "AND ";
        } else {
            query += "WHERE ";
        }
        query += "k.numTerms > 1\n"
                + "with k, k.value as ks_orig\n"
                + "match (k2:" + label + ")\n"
                + "where k2.numTerms > k.numTerms and k2.value CONTAINS ks_orig\n"
                + "match (k2)-[r2:DESCRIBES]->(a:AnnotatedText)\n"
                + "where not (k)-[:DESCRIBES]->(a)\n" +
                "MERGE (k)-[rn:DESCRIBES]->(a) SET rn.count = k2.count, rn.count_exactMatch = k2.count_exactMatch";
        return query;
    }

    private Set<Long> getKeywordIds(Node annotatedText) {
        Set<Long> ids = new HashSet<>();
        if (annotatedText != null) {
            annotatedText.getRelationships(Direction.INCOMING, Relationships.DESCRIBES).forEach(relationship -> {
                ids.add(relationship.getStartNodeId());
            });
        }

        return ids;
    }

    private List<Long> getTopX(Map<Long, Double> pageRanks, int x) {
        List<Long> topx = pageRanks.entrySet().stream()
                .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
                .limit(x)
                .map((item) -> item.getKey()).collect(Collectors.toList());
        return topx;
    }

    private Map<Integer, Set<Long>> createCoOccurrenceMapping(Map<Long, CoOccurrenceItem> coOccorrence) {
        Map<Integer, Set<Long>> result = new HashMap<>();
        coOccorrence.entrySet().stream().forEach((entry) -> {
            entry.getValue().getStartPositions()
                    .forEach((pairStartingPoint) -> {
                        if (pairStartingPoint.first() < pairStartingPoint.second()) {
                            if (!result.containsKey(pairStartingPoint.first())) {
                                result.put(pairStartingPoint.first(), new TreeSet());
                            }
                            result.get(pairStartingPoint.first()).add(entry.getValue().getDestination());
                        }
                    });
        });
        return result;
    }

    private void computeTotalOccurrence(Map<String, Keyword> results) {
        results.entrySet().stream().forEach((entry) -> {
            results.entrySet().stream().forEach((innerEntry) -> {
                if (entry.getValue().getWordsCount() < innerEntry.getValue().getWordsCount()
                        && innerEntry.getValue().getRawKeyword().contains(entry.getValue().getRawKeyword())) {
                    entry.getValue().incTotalCountBy(innerEntry.getValue().getTotalCount());
                }
            });
        });
    }

    private Map<String, Keyword> cleanFinalKeywords(Map<String, Keyword> results, int topx) {
        Map<String, Keyword> newResults = new HashMap<>(results);
        results.entrySet().stream().forEach((entry) -> {
            results.entrySet().stream().forEach((innerEntry) -> {
                if (entry.getValue().getWordsCount() < innerEntry.getValue().getWordsCount()
                        && innerEntry.getValue().getRawKeyword().contains(entry.getValue().getRawKeyword())
                ) {
                    if (entry.getValue().getWordsCount() == 1) // remove single-word keywords when they occur _within_ some key phrase
                        newResults.remove(entry.getKey());
                }
            });
        });

        // Use (PR * tf*idf) for selecting top 1/3 of keywords / key phrases
        // Crucial piece of code for TextRank with dependencies enrichment, because the enrichment can cause more than 1/3 (topx) Keywords to be constructed
        if (useDependencies) {
            Map<String, Double> pom = newResults.entrySet().stream()
                    .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getRelevance()));
            pom.entrySet().stream()
                    .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
                    .skip(topx)
                    .forEach(entry -> newResults.remove(entry.getKey()));
        }

        return newResults;
    }


    public static class Builder {

        private static final String[] STOP_WORDS = {"new", "old", "large", "big", "vast", "small", "many", "few", "good", "better", "best", "bad", "worse", "worst"};
        private static final String[] ADMITTED_POS = {"NN", "NNS", "NNP", "NNPS", "JJ", "JJR", "JJS"}; // for final keyword selection
        private static final String[] FORBIDDEN_NE = {"NER_Number", "NER_Ordinal", "NER_Percent", "NER_Date", "NER_Duration"}; //"NER_Date", "NER_Duration" // for construction of graph of co-occurrences
        private static final String[] FORBIDDEN_POS = {"CC", "CD", "DT", "EX", "IN", "LS", "MD", "PDT", "PRP", "PRP$", "RP", "RB", "RBR", "RBS", "TO", "UH", "WDT", "WP", "WP$", "WRB"}; // for construction of graph of co-occurrences
        private static final String[] STOP_WORDS_MEDIUM = {"now", "later", "least", "well", "always", "new", "old", "good", "better", "best", "great", "bad", "worse", "worst", "much", "more", "less", "several", "larger", "smaller", "big", "lower", "widely", "highly", "many", "few", "with", "without", "via", "therefore", "furthermore", "whose", "whether", "though", "although", "to", "not", "of", "prior", "instead", "upon", "every", "together", "across", "toward", "towards", "since", "around", "along", "onto", "into", "already", "whilst", "while", "than", "then", "anyway", "whole", "thus", "throughout", "through", "during", "above", "below", "use", "due", "do", "be", "have", "got", "might", "may", "shall", "can", "could", "would", "will", "such", "like", "other", "another", "far", "away"};
        private static final String[] STOP_WORDS_LARGE = {"now", "recently", "late", "later", "lately", "recent", "finally", "often", "always", "new", "old", "novel", "least", "last", "well", "good", "better", "best", "great", "bad", "worse", "worst", "much", "more", "less", "several", "large", "larger", "small", "smaller", "big", "vast", "little", "lower", "long", "short", "wide", "widely", "highly", "many", "few", "with", "without", "via", "therefore", "furthermore", "whose", "whether", "though", "although", "to", "not", "of", "prior", "instead", "upon", "every", "together", "across", "toward", "towards", "since", "around", "along", "onto", "into", "already", "whilst", "while", "than", "then", "anyway", "whole", "thus", "throughout", "through", "during", "above", "below", "use", "due", "do", "be", "have", "got", "make", "might", "may", "shall", "can", "could", "would", "will", "entire", "entirely", "overall", "useful", "usefully", "easy", "easier", "certain", "such", "like", "difficult", "necessary", "unnecessary", "full", "fully", "empty", "successful", "successfully", "unsuccessful", "unsuccessfully", "especially", "usual", "usually", "other", "another", "far", "away"};

        private static final boolean DEFAULT_REMOVE_STOP_WORDS = false;
        private static final boolean DEFAULT_DIRECTION_MATTER = false;
        private static final boolean DEFAULT_RESPECT_SENTENCES = false;
        private static final boolean DEFAULT_USE_TYPED_DEPENDENCIES = true;
        private static final boolean DEFAULT_COOCURRENCES_FROM_DEPENDENCIES = false;
        private static final boolean DEFAULT_CLEAN_KEYWORDS = true;
        private static final int DEFAULT_CO_OCCURRENCE_WINDOW = 2;
        private static final double DEFAULT_TAGS_TOPX = 1 / 3.0f;

        private final GraphDatabaseService database;
        private boolean removeStopWords = DEFAULT_REMOVE_STOP_WORDS;
        private boolean directionsMatter = DEFAULT_DIRECTION_MATTER;
        private boolean respectSentences = DEFAULT_RESPECT_SENTENCES;
        private boolean useDependencies = DEFAULT_USE_TYPED_DEPENDENCIES;
        private boolean cooccurrencesFromDependencies = DEFAULT_COOCURRENCES_FROM_DEPENDENCIES;
        private boolean cleanKeywords = DEFAULT_CLEAN_KEYWORDS;
        private double topxTags = DEFAULT_TAGS_TOPX;
        private Label keywordLabel;
        //private Set<String> stopWords = new HashSet<>(Arrays.asList(PARAMETER_STOP_WORDS));
        private Set<String> stopWords = new HashSet<>(Arrays.asList(STOP_WORDS_MEDIUM));
        //private Set<String> stopWords = new HashSet<>(Arrays.asList(STOP_WORDS_LARGE));
        private List<String> admittedPOSs = Arrays.asList(ADMITTED_POS);
        private List<String> forbiddenNEs = Arrays.asList(FORBIDDEN_NE);
        private List<String> forbiddenPOSs = Arrays.asList(FORBIDDEN_POS);


        public Builder(GraphDatabaseService database, DynamicConfiguration configuration) {
            this.database = database;
            this.keywordLabel = configuration.getLabelFor(Labels.Keyword);
        }

        public TextRank build() {
            TextRank result = new TextRank(database,
                    removeStopWords,
                    directionsMatter,
                    respectSentences,
                    useDependencies,
                    cooccurrencesFromDependencies,
                    cleanKeywords,
                    topxTags,
                    keywordLabel,
                    stopWords,
                    admittedPOSs,
                    forbiddenNEs,
                    forbiddenPOSs);
            return result;
        }

        public Builder setStopwords(String stopwords) {
            if (stopwords.split(",").length > 0 && stopwords.split(",")[0].equals("+")) { // if the stopwords list starts with "+,....", append the list to the default 'stopWords' set
                this.stopWords.addAll(Arrays.asList(stopwords.split(",")).stream().filter(str -> !str.equals("+")).map(str -> str.trim().toLowerCase()).collect(Collectors.toSet()));
            } else {
                this.stopWords = Arrays.asList(stopwords.split(",")).stream().map(str -> str.trim().toLowerCase()).collect(Collectors.toSet());
            }
            this.removeStopWords = true;
            return this;
        }

        public Builder removeStopWords(boolean val) {
            this.removeStopWords = val;
            return this;
        }

        public Builder respectDirections(boolean val) {
            this.directionsMatter = val;
            return this;
        }

        public Builder respectSentences(boolean val) {
            this.respectSentences = val;
            return this;
        }

        public Builder useDependencies(boolean val) {
            this.useDependencies = val;
            return this;
        }

        public Builder useDependenciesForCooccurrences(boolean val) {
            this.cooccurrencesFromDependencies = val;
            return this;
        }

        public Builder setAdmittedPOSs(List<String> admittedPOSs) {
            if (admittedPOSs != null && !admittedPOSs.isEmpty())
                this.admittedPOSs = admittedPOSs;
            return this;
        }

        public Builder setForbiddenPOSs(List<String> forbiddenPOSs) {
            if (forbiddenPOSs != null && !forbiddenPOSs.isEmpty())
                this.forbiddenPOSs = forbiddenPOSs;
            return this;
        }

        public Builder setForbiddenNEs(List<String> forbiddenNEs) {
            if (forbiddenNEs != null && !forbiddenNEs.isEmpty())
                this.forbiddenNEs = forbiddenNEs;
            return this;
        }

        public Builder setTopXTags(double topXTags) {
            this.topxTags = topXTags;
            return this;
        }

        public Builder setKeywordLabel(String keywordLabel) {
            //this.keywordLabel = Labels.valueOf(keywordLabel); // doesn't work because Labels is enum, while we want customizable keyword labels
            this.keywordLabel = Label.label(keywordLabel);
            return this;
        }

        public Builder setCleanKeywords(boolean cleanKeywords) {
            this.cleanKeywords = cleanKeywords;
            return this;
        }
    }
}
