package de.dopichaj.labrador.index;


import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;

import de.dopichaj.labrador.index.meta.DocumentMetaDataWriter;
import de.dopichaj.labrador.index.selector.AllSelector;
import de.dopichaj.labrador.index.selector.IndexSelector;
import de.dopichaj.labrador.util.Configuration;


/**
 * Add DOM documents to an index.
 */
public final class DocumentAdder {
    
    private static final Logger log = Logger.getLogger(DocumentAdder.class);
    private final IndexAdder indexAdder;
    private final Tokenizer tokenizer;
    private final TagFactory tagFactory;
    private final DocumentMetaDataWriter digestOutput;
    private final IndexSelector selector;
    
    /**
     * A flag whether to handle collisions on element boundaries (for example,
     * he&lt;i&gt;llo&lt;/i&gt;).
     * @see #nodesCollide(Node, Node)
     */
    private final boolean handleCollision;
    
    /**
     * Stop indexing at insanely deep nesting levels.
     */
    private static final int MAX_DEPTH = 100;
    
    private final int maxInlineLength;
    private final boolean differenceBased;
    private final boolean detailedDigest = true;
    
    /**
     * The total lenghth of all elements.
     */
    private int lengthSum = 0;
    private int elementCount = 0;
    private int documentCount = 0;
    

    /**
     * Constructs a new DocumentAdder.
     * 
     * @param handleCollision
     *          a flag whether to handle collisions on element boundaries
     *          (for example, he&lt;i&gt;llo&lt;/i&gt;).
     * @param maxInlineLength
     *          the maximum length in tokens, excluding stopwords, of an element
     *          to be considered an inline element; this is used for static
     *          patterns; set to 0 to disable static patterns.
     */
    public DocumentAdder(IndexAdder indexAdder, Tokenizer tokenizer,
        DocumentMetaDataWriter digestOutput, TagFactory tagFactory,
        IndexSelector selector, boolean handleCollision, int maxInlineLength,
        boolean differenceBased) {
        
        this.indexAdder = indexAdder;
        this.tokenizer = tokenizer;
        this.digestOutput = digestOutput;
        this.tagFactory = tagFactory;
        this.selector = selector;
        this.handleCollision = handleCollision;
        this.maxInlineLength = maxInlineLength;
        this.differenceBased = differenceBased;
        log.info("maxInlineLength: " + maxInlineLength);
        log.info("differenceBased: " + differenceBased);
    }

    public DocumentAdder(final IndexAdder indexAdder, final Tokenizer tokenizer,
        final DocumentMetaDataWriter digestOutput, final TagFactory tagFactory,
        final IndexSelector selector, final Configuration configuration) {
        
        this(indexAdder, tokenizer, digestOutput, tagFactory, selector,
            configuration.getBool("handle-collision", false),
            configuration.getInt("max-inline-length", 0),
            configuration.getBool("difference-based", false));
            
    }
    
    public DocumentAdder(IndexAdder indexAdder, Tokenizer tokenizer,
        DocumentMetaDataWriter digestOutput, TagFactory tagFactory) {
        this(indexAdder, tokenizer, digestOutput, tagFactory, new AllSelector(),
            true, 0, false);
    }

    /**
     * Add a DOM document to the index. Note that the given document is
     * normalized before processing.
     * @param document the document that shall be added to the index.
     * @param idOffset the ID offset to use for generating fragment IDs
     * @return the next unused fragment ID (all fragments generated by this call
     *         will have smaller IDs).
     * @throws IOException if the IndexAdder throws it when adding.
     */
    public void addDocument(Document document) throws IOException {
        
        documentCount++;
        
        Element root = document.getDocumentElement();
        root.normalize();
        normalize(document, root);
        root.normalize();
        addElementRecursive(new ArrayList<String>(), "", root, 1, 0, 0);
    }
    
    /**
     * @param tokens the list where this element's tokens will be stored.
     * @return the real length of the element.
     * @throws IOException
     */
    private int addElementRecursive(List<String> tokens,
        String parentXPath, Element element,
        int xPathPos, int contentPos, int depth) throws IOException {

        final int currentId = digestOutput.getId();
        
        int textSize = 0;
        
        // construct this node's XPath
        final String xPath = parentXPath +
            "/" + element.getTagName() + "[" + xPathPos + "]";
        digestOutput.openElement(tagFactory.getTag(element.getTagName()));

        // process the children
        NodeList children = element.getChildNodes();
        Map<String, Integer> posCount = new HashMap<String, Integer>();
        int childContentPos = contentPos;
        boolean lastWasText = false;
        List<String> inlineTokens = new ArrayList<String>();
        int realLength = 0;
        for (int i = 0; i < children.getLength(); i++) {
            Node node = children.item(i);

            // if the child is an element, add it to the de.dopichaj.labrador.index
            if (node.getNodeType() == Node.ELEMENT_NODE) {
                
                // ... determine the XPath position
                final Element child = (Element)node;
                final String tag = child.getTagName();
                int childPos;
                if (posCount.containsKey(tag)) {
                    childPos = posCount.get(tag);
                } else {
                    childPos = 1;
                }
                posCount.put(tag, childPos + 1);
                
                // ... add the element to the de.dopichaj.labrador.index
                List<String> childTokens = new ArrayList<String>();
                final int childLength =
                    addElementRecursive(childTokens, xPath, child,
                    childPos, childContentPos, depth + 1);
                if (!differenceBased) {
                    tokens.addAll(childTokens);
                }
                realLength += childLength;
                
                // ... if it is an inline child, add its tokens to the inline
                // tokens
                if (childLength <= maxInlineLength) {
                    inlineTokens.addAll(childTokens);
                }
                
                lastWasText = false;
            
            // if it is a text node, get its tokens
            } else if (node.getNodeType() == Node.TEXT_NODE) {
                assert !lastWasText : "DOM not normalized";
                List<String> textTokens = tokenizer.tokenize(node.getTextContent());
                tokens.addAll(textTokens);
                lastWasText = true;
                realLength += textTokens.size();
                
                if (detailedDigest) {
                    digestOutput.text(textTokens.size());
                } else {
                    textSize += textTokens.size();
                }
            }
            
            // update the next child's position
            childContentPos = contentPos + tokens.size();
        }

        // add this node
        if (differenceBased) {
            tokens.addAll(inlineTokens);
        }
        final int length = tokens.size();
        final boolean doIndex = selector.doIndex(element) && realLength > maxInlineLength
                    && depth < MAX_DEPTH;
        if (doIndex) {
            lengthSum += realLength;
            elementCount++;
        }

        if (doIndex && length > 0) {
            
            final List<String> newTokens = new ArrayList<String>(tokens);
            newTokens.addAll(inlineTokens);
            
            indexAdder.addToIndex(currentId, xPath, newTokens,
                realLength, contentPos);
        }
        if (!detailedDigest) {
            digestOutput.text(textSize);
        }
        digestOutput.closeElement();
        return realLength;
    }

    /**
     * Normalize the document, starting at <code>parent</code>.
     * Removes all nodes that are not element or text nodes and handles
     * collisions.
     */
    private void normalize(Document document, Element parent) {
        
        // determine if this node is a data node
        boolean isDataNode = isDataNode(parent);
        
        // for each child
        Node child = parent.getFirstChild();
        while (child != null) {
            
            // only look at elements
            if (child.getNodeType() == Node.ELEMENT_NODE) {
                
                // if the parent is not a data node and there is a collision
                if (!isDataNode &&
                    (nodesCollide(child.getPreviousSibling(), child) ||
                        nodesCollide(child, child.getNextSibling()))) {
                    
                    // merge the child's text with the parent and remove the node
                    Node oldChild = child;
                    child = document.createTextNode(child.getTextContent());
                    parent.replaceChild(child, oldChild);
                
                // otherwise recurse
                } else {
                    normalize(document, (Element)child);
                }
                child = child.getNextSibling();
                
            // remove everything except element and text nodes
            } else if (child.getNodeType() != Node.TEXT_NODE) {

                final Node oldChild = child;
                child = child.getNextSibling();
                parent.removeChild(oldChild);

            } else {

                child = child.getNextSibling();
            }
        }
    }
    
    /**
     * Determine if the given node is a data node.
     * A data node is a node whose textual children consist of whitespace only. 
     */
    private boolean isDataNode(Node node) {
        
        boolean result = true;
        NodeList children = node.getChildNodes();
        for (int i = 0; i < children.getLength(); i++) {
            
            Node child = children.item(i);
            if (child.getNodeType() == Node.TEXT_NODE &&
                !StringUtils.isBlank(child.getTextContent())) {
                result = false;
            }
        }
        
        return result;
    }

    /**
     * Determine whether the given nodes collide. If {@link #handleCollision} is
     * set to <code>false</code>, always returns false. Otherwise, the nodes
     * collide if the left nodes ends with and the right node starts with a
     * letter. Either left or right may be null, in which case the result is
     * <code>false</code>.
     */
    private boolean nodesCollide(Node left, Node right) {
        return handleCollision && endsWithText(left) && startsWithText(right);
    }
    
    private boolean endsWithText(Node node) {
        if (node == null) {
            return false;
        } else {
            String text = node.getTextContent();
            return text.length() > 0 &&
                Character.isLetter(text.codePointBefore(text.length()));
        }
    }

    private boolean startsWithText(Node node) {
        if (node == null) {
            return false;
        } else {
            String text = node.getTextContent();
            return text.length() > 0 &&
                Character.isLetter(text.codePointAt(0));
        }
    }

    public int getDocumentCount() {
        return documentCount;
    }

    public int getElementCount() {
        return elementCount;
    }

    public int getLengthSum() {
        return lengthSum;
    }
}
/*
Copyright (c) 2007 Philipp Dopichaj

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/