package sublinear.model.db;

import java.io.File;
import java.io.IOException;
import java.sql.SQLException;
import java.util.HashSet;
import java.util.Vector;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import javax.xml.parsers.ParserConfigurationException;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;

import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.DefaultHandler;

import sublinear.model.db.DbGraphModel.Article;
import sublinear.model.db.DbGraphModel.Redirect;


/**
 * Specialized XML based parser for the offline Wikipedia XML dump.
 * Creates a database containing a graph representation of the data, made of
 * articles (= vertices) and internal links (= nodes)
 * 
 * @author itay
 *
 */
public class OfflineWikipediaParser extends DefaultHandler {
	
	private static final Logger log = Logger.getLogger( OfflineWikipediaParser.class.getName() );
	
	// Current parsing step
	private enum Phase {
		Articles,
		Redirects,
		Links
	}
	
	// Internal state indicators
	private boolean inPage;
	private boolean inTitle;
	private boolean inId;
	private boolean inRevision;
	private boolean inText;
	private Phase phase;
	
	// Current values
	private String title;
	private String redirect;
	private long id;
	// NOT thread safe, but faster
	private StringBuilder internalStringBuffer;
	
	// Parsing properties
	/** 
	 * Create links that their destination is unresolved. This could cause
	 * a single "unresolved" node with a very high degree
	 */
	private boolean createLinksToUnresolvedDestinations;
	/**
	 * Make sure each (ID => Destination) pair is unique
	 */
	private boolean uniqueLinks;
	
	// Model
	private DbGraphModel model;
	// Filter
	final static ArticlesFilter filter = new ArticlesFilter();
	
	/** 
	 * Regular expression used to parse MediaWiki links out of a given text.
	 * Taken from: http://stackoverflow.com/a/809858/397604, with a few modifications
	 */
    private static Pattern mediaWikiLinksPattern = Pattern.compile("\\[\\[([\\w #]+)(?:\\|[\\w ]+)?\\]\\]");
	
	public OfflineWikipediaParser(DbGraphModel model) {
		inPage = false;
		inTitle = false;
		inId = false;
		inRevision = false;
		inText = false;
		
		this.model = model;
		
		title = null;
		redirect = null;
		id = 0;
		internalStringBuffer = new StringBuilder();
		
		createLinksToUnresolvedDestinations = false;
		uniqueLinks = true;
	}

	public void startDocument() throws SAXException {

	}

	public void endDocument() throws SAXException {

	}	

	public void startElement(String namespaceURI,
			String localName,
			String qName, 
			Attributes atts)
					throws SAXException {
		
		if (localName.equals("page")) {
			// Starting a new page element
			inPage = true;
		} else if (localName.equals("title")) {
			//Logger.log("Start title");
			inTitle = true;	
		} else if (localName.equals("id")) {
			inId = true;
		} else if (localName.equals("redirect")) {
			redirect = atts.getValue("title");
		} else if (localName.equals("revision")) {
			inRevision = true;
		} else if (localName.equals("text")) {
			inText = true;
		}

	}
	
	public void endElement(String uri, String localName, String qName)
					throws SAXException {
				
		if (localName.equals("page")) {
			// Page element done - now do the work according to the phase
			inPage = false;
			if (phase == Phase.Articles) {
				try {
					Article article = new Article(id, title);
					if (filter.accept(article)) {
						log.finer("Adding article: " + title + " (" + id + ")");
						model.insertArticle(new Article(id, title));
					} else {
						log.finer("Filtered article: " + title + " (" + id + ")");
					}
				} catch (SQLException e) {
					log.log(Level.WARNING, "Exception while trying to insert article", e);
				}
			} else if (phase == Phase.Redirects) {
				if (redirect != null) {
					try {
						long destId = model.getArticleByTitle(redirect);
						if (createLinksToUnresolvedDestinations || destId != DbGraphModel.UNRESOLVED) {
							model.insertRedirect(new Redirect(id, destId));
							log.finer("Adding redirect: " + title + " (" + id + ") => " + redirect + " (" + destId + ")");
						} else {
							log.finer("Ignoring unresolved redirect: " + title + " (" + id + ") => " + redirect);
						}
					} catch (SQLException e) {
						log.log(Level.WARNING, "Exception while trying to insert redirect", e);
					}					
				}
			} else if (phase == Phase.Links) {
				log.finer("Parsing article " + title + " for links...");
				if (redirect != null) {
					log.fine("This is a redirection article, skip parsing");
				} else if (!filter.accept(new Article(id, title))) {
					log.fine("This is a filtered article, skip parsing");
				} else {
					Vector<String> outgoingLinks = getLinksFromText(internalStringBuffer.toString());
					HashSet<Long> outgoingLinkSet = new HashSet<Long>();
					log.finest("Found " + outgoingLinks.size() + " links");
					
					for (String destArticle : outgoingLinks) {
						try {
							long destId = model.resolveArticleByTitle(destArticle);
							if (uniqueLinks) {
								if (outgoingLinkSet.contains(destId))
									continue;
								
								outgoingLinkSet.add(destId);
							}
	
							if (createLinksToUnresolvedDestinations || destId != DbGraphModel.UNRESOLVED) {
								model.insertLink(new DbGraphModel.Link(id, destId));
								log.finer("Added link: " + title + " (" + id + ") => " + destArticle + " (" + destId + ")");
							}
						} catch (SQLException e) {
							log.log(Level.WARNING, "Exception while trying to insert link", e);
						}											
					}
				}
				
			}
			redirect = null;
			internalStringBuffer.setLength(0);
		} else if (localName.equals("title")) {
			inTitle = false;
			title = internalStringBuffer.toString();
			internalStringBuffer.setLength(0);
		} else if (localName.equals("id")) {
			inId = false;
		} else if (localName.equals("revision")) {
			inRevision = false;
		} else if (localName.equals("text")) {
			inText = false;
		}
		
	}
	
	public void characters(char ch[], int start, int length) throws SAXException {
		
		// Ignore anything that is not within a page
		if (!inPage)
			return;
		
		// Article's title
		if (inTitle) {
			// While titles are short, we must parse in parts here as well otherwise
			// we might get in trouble for some titles such as:
			// File:Of Monsters and Men - "King and Lionheart" (Promotional Single).jpg
			String titlePart = new String(ch, start, length);
			internalStringBuffer.append(titlePart);
			//log.finest("Title part: " + titlePart);
		}
		
		// ID. Notice there are two possible IDs, one inside the revision scope
		// and one before that - we only want the one outside
		else if (!inRevision && inId) {
			String idStr = new String(ch, start, length);
			//Logger.log("Article ID: " + idStr);
			id = Long.valueOf(idStr);
		}
		
		// Link parsing. This is heavy, so only do that when looking for links
		else if (phase == Phase.Links && inText) {			
			String articleText = new String(ch, start, length);
			internalStringBuffer.append(articleText);
		}
		
	}
	
	public void setPhase(Phase p) {
		log.info("Set phase to " + p.name());
		phase = p;
	}
	
	/**
	 * Utility function for extracting internal links out of MediaWiki formatted text.
	 * This is done by a simple pattern matching, with the following expected result:
	 * [[Link]] --> Link
	 * [[Link|Title]] --> Link
	 * [[Link#Section|Title]] --> Link
	 * 
	 * All other text is simply ignored.
	 * 
	 * @param text MediaWiki formatted text
	 * @return Vector containing all link destinations. See description
	 */
	public static Vector<String> getLinksFromText(String text) {
		Vector<String> links = new Vector<String>();
		
		Matcher matcher = mediaWikiLinksPattern.matcher(text);
		while (matcher.find()) {
			String destTitle = matcher.group(1);
			
			// Special case: handle links that contains a section part
			// We will simply drop the section part, treating them as
			// standard links
			int idx = destTitle.indexOf('#');
			if (idx != -1) {
				log.finest("Cutting section part: " + destTitle + " --> " + destTitle.substring(0, idx));
				destTitle = destTitle.substring(0, idx);
			}
			
			log.finest("Found link: " + destTitle);
			links.add(destTitle);
		}
		
		return links;
	}

	/**
	 * Converts the input file name argument to URL object, as required by the SAX 
	 * parser.
	 * The file name is converted to absolute path, with '/' as separator char.
	 * 
	 * @param filename String with the file name
	 * @return URL object with the absolute path of the input file
	 */
	private static String convertToFileURL(String filename) {
		String path = new File(filename).getAbsolutePath();
		if (File.separatorChar != '/') {
			path = path.replace(File.separatorChar, '/');
		}

		if (!path.startsWith("/")) {
			path = "/" + path;
		}
		return "file:" + path;
	}	
	
	public static void doParse(DbGraphModel model, String xmlFileName) 
			throws ParserConfigurationException, SAXException, IOException, SQLException {
		
		/**
		 * Here we are running the actual work of parsing the XML document and creating a database made
		 * of articles and internal links.
		 * We are running multiple times through the document:
		 * 1. Create articles table: ID => title
		 * 2. Create redirects table: ID => ID
		 * 3. Create links table: ID => ID 
		 * 
		 * The special ID 0 will be used for missing articles
		 */
		
		log.info("Called for: " + xmlFileName);
		
		SAXParserFactory spf = SAXParserFactory.newInstance();
		spf.setNamespaceAware(true);
		SAXParser saxParser = spf.newSAXParser();

		String xmlFileUrl = convertToFileURL(xmlFileName); 
		log.finest("Resolved URL: " + xmlFileUrl);
		
		/*
		// Create input stream. This will allow working with compressed
		// files as well (will also require adding a few stream handling 
		 // code)
		File xmlFile = new File(xmlFileName);
		InputStream inputStream = new FileInputStream(xmlFile);
		Reader reader = new InputStreamReader(inputStream, "UTF-8");
		 
		InputSource is = new InputSource(reader);
		is.setEncoding("UTF-8");
		*/
		XMLReader xmlReader = saxParser.getXMLReader();
		OfflineWikipediaParser handler = new OfflineWikipediaParser(model); 
		xmlReader.setContentHandler(handler);
		handler.setPhase(Phase.Articles);
		xmlReader.parse(xmlFileUrl);
		model.flushArticles();
		model.createArticlesIndexes();
		handler.setPhase(Phase.Redirects);
		xmlReader.parse(xmlFileUrl);
		model.flushRedirects();
		model.createRedirectsIndexes();
		handler.setPhase(Phase.Links);
		xmlReader.parse(xmlFileUrl);
		model.flushLinks();
		model.createLinksIndexes();
		
	}

}
