package cs429_project;

import cs429_project.clustering.DocVec;

import java.io.IOException;

import java.net.MalformedURLException;
import java.net.URL;

import java.util.ArrayList;
import java.util.Iterator;
import java.util.Set;
import java.util.TreeSet;

import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;

import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;

import org.xml.sax.SAXException;

/**
 * A library containing various methods that work with Wikipedia pages to
 * convert them into data for use in our algorithm.
 * @author Peter Ferrans
 */
public class WikiPageHandler {

    /**
     * Given the URL of a Wikipedia XML page, searches the document and
     * extracts the text portion of it.
     * @param wikiPageUrl
     *    URL of the Wikipedia XML page.
     * @return
     *    The text extracted from the Wikipedia XML page.
     * @throws MalformedURLException
     *    Thrown if the URL is not valid.
     */
    public static String getPageText(String wikiPageUrl)
            throws MalformedURLException {
        URL url = new URL(wikiPageUrl);
        Document doc = getDomDocument(url);
        String text = extractDocumentText(doc);
        text = text.toLowerCase();

        return text;
    }

    /**
     * Given a string of text from a Wikipedia page, returns all of the links
     * to other Wikipedia pages that appear in the text in a bullet list.
     * @param text
     *    Text to be searched for links.
     * @return
     *    A set of all the links found in the Wikipedia page text in a bullet
     *    list.
     */
    public static TreeSet<String> getPagesFromWikiText(String text) {
        TreeSet<String> pages = new TreeSet<String>();
        while (!(text.isEmpty())) {
            String starter = "*[[";
            int startIndex = text.indexOf(starter) + starter.length();
            if (startIndex < starter.length()) {
                text = "";
            } else {
                String ender = "]]";
                int endIndex = text.indexOf(ender, startIndex);
                String pageLink = text.substring(startIndex, endIndex);
                int splitPoint = pageLink.indexOf("|");
                if (splitPoint >= 0) {
                    pageLink = pageLink.substring(0, splitPoint);
                }
                pages.add(pageLink);

                int newStart = endIndex + ender.length();
                text = text.substring(newStart);
            }
        }
        return pages;
    }

    /**
     * Given the text of a Wikipedia page, converts the text into a document
     * vector with an ID number of -1.
     * @param text
     *    Text to convert to a document vector.
     * @return
     *    The created document vector.
     */
    public static DocVec docVecFromWikiPageText(String text) {
        String[] tokens = tokenizeText(text).toArray(new String[0]);
        DocVec dv = new DocVec(tokens, -1);

        return dv;
    }

    /**
     * Given a set of Wikipedia page URLs, converts the pages of those URLs to
     * document vectors and returns them.
     * @param pageUrls
     *    URLs for Wikipedia pages to convert to document vectors.
     * @return
     *    Wikipedia pages converted to document vectors.
     * @throws MalformedURLException
     *    Thrown if one of the URLs is not properly formed.
     */
    public static TreeSet<DocVec> docVecsFromWikiPages(Set<String> pageUrls)
            throws MalformedURLException {
        TreeSet<DocVec> docSet = new TreeSet<DocVec>();
        Iterator<String> pageIt = pageUrls.iterator();
        int negCounter = -1;

        while (pageIt.hasNext()) {
            String url = pageIt.next();
            String text = getPageText(url);
            String[] tokens = tokenizeText(text).toArray(new String[0]);
            DocVec dv = new DocVec(tokens, negCounter);
            docSet.add(dv);
            negCounter = negCounter - 1;
        }

        return docSet;
    }

    /**
     * Given a string of text, converts it to tokens.  Tokens split the string
     * wherever whitespace or punctuation marks occur.
     * @param text
     *    Text to be tokenized.
     * @return
     *    A list of the tokens.
     */
    public static ArrayList<String> tokenizeText(String text) {
        text = text.toLowerCase();
        String regex = "\\s+";
        String[] textArray = text.split(regex);
        regex = "\\p{Punct}+";
        ArrayList<String> tokenList = new ArrayList<String>();
        for (int i = 0; i < textArray.length; i++) {
            String[] tokens = textArray[i].split(regex);
            for (int j = 0; j < tokens.length; j++) {
                if (!(tokens[j].equals(""))) {
                    tokenList.add(tokens[j]);
                }
            }
        }

        return tokenList;
    }

    /**
     * Given a URL for an XML document on the web, creates the DOM Document
     * corresponding to that document.
     * @param pageURL
     *    URL of the XML document.
     * @return
     *    The DOM Document created from the XML document.
     * @see org.w3c.dom.Document
     */
    public static Document getDomDocument(URL pageURL) {
        Document doc = null;
        try {
            DocumentBuilderFactory factory =
                    DocumentBuilderFactory.newInstance();
            DocumentBuilder builder = factory.newDocumentBuilder();
            doc = builder.parse(pageURL.openStream());
        } catch (IOException e) {
            System.err.println(e.getMessage());
        } catch (ParserConfigurationException e) {
            System.err.println(e.getMessage());
        } catch (SAXException e) {
            System.err.println(e.getMessage());
        }

        return doc;
    }

    /**
     * Given a DOM Document created from a Wikipedia XML page, extracts all of
     * the data from the Document that is contained under the XML tag TEXT.
     * @param doc
     *    The DOM Document containing the text to be extracted.
     * @return
     *    The text extracted from the Document.
     * @see org.w3c.dom.Document
     */
    public static String extractDocumentText(Document doc) {
        String text;

        Element rootElem = doc.getDocumentElement();
        Element pageElem = getSingleElement(rootElem, Tags.PAGE);
        if (pageElem == null) {
            text = null;
        } else {
            Element revisionElem = getSingleElement(pageElem, Tags.REVISION);
            Element textElem = getSingleElement(revisionElem, Tags.TEXT);
            text = textElem.getTextContent();
        }

        return text;
    }

    /**
     * Given a DOM Element, which has exactly one child Element with a given
     * tag, extracts the child Element and returns it to the user.
     * @param parentElem
     *    Element containing the child Element to be extracted.
     * @param tag
     *    The name of the tag specifying the Element to be extracted.
     * @return
     *    The Element that was extracted.
     * @see org.w3c.dom.Element
     */
    private static Element getSingleElement(Element parentElem, String tag) {
        NodeList list = parentElem.getElementsByTagName(tag);
        Node node = list.item(0);
        Element element = (Element) (node);

        return element;
    }

    /**
     * Names of XML tags that we need to extract the text from Wikipedia XML
     * documents.
     */
    public class Tags {

        /** The "page" tag. */
        public static final String PAGE = "page";
        /** The "revision" tag. */
        public static final String REVISION = "revision";
        /** The "text" tag. */
        public static final String TEXT = "text";
    }
}
