package crawler;

import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map.Entry;

import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
import javax.xml.xpath.XPathExpression;

import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;

import common.Constants;

public class XmlExtractor {
	
	public void Extract(CrawlerDocument doc){
		String content = (String)doc.getFeature(Constants.DOC_FEATURE_KEY_CONTENT);
		String xpathroot = (String)doc.getFeature(Constants.DOC_FEATURE_KEY_XPATH_ROOT);	
		
		XPathFactory factory = XPathFactory.newInstance();
		XPath xpath = factory.newXPath();
		
		NodeList nodelist = null;
		try {
			XPathExpression expr = xpath.compile(xpathroot);
			InputStream ins = new ByteArrayInputStream(content.getBytes("utf-8"));
			InputSource source = new InputSource(ins);
			source.setEncoding("utf-8");
			nodelist = (NodeList)expr.evaluate(source, XPathConstants.NODESET);
			
		} catch (XPathExpressionException e) {
			// TODO Add log here
			e.printStackTrace();
		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}		
		
		@SuppressWarnings("unchecked")
		HashMap<String, String> xpaths = (HashMap<String, String>)
			doc.getFeature(Constants.DOC_FEATURE_KEY_XPATHS);
		
		for (int i = 0; i < nodelist.getLength(); ++i){
			CrawlerDocument outlink = ExtractOutlink(nodelist.item(i), xpaths);
			String url = (String)outlink.getFeature(Constants.DOC_FEATURE_KEY_LINK);
			if (null != url){
				try {
					outlink.setUrl(url);
					doc.addOutlinkDoc(outlink);
				} catch (MalformedURLException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
					continue;
				}
			}
		}
	}
	
	private CrawlerDocument ExtractOutlink(Node node, HashMap<String, String> xpaths){
		
		CrawlerDocument doc = new CrawlerDocument();
	
		String rootname = node.getNodeName();
		boolean success = RecursiveExtract(rootname, node, xpaths, doc, "");
		
		if (success){
			return doc;
		}else{
			return null;
		}
		
	}
	
	private boolean RecursiveExtract(String rootname, Node node, 
			HashMap<String, String> xpaths, CrawlerDocument doc, String path){		
		
		if (null == node){
			return false;
		}
		
		path += node.getNodeName() + "/";
		
		//System.out.println("path = " + path);
		
		for(Iterator<Entry<String, String>> it = xpaths.entrySet().iterator(); it.hasNext(); ){
			Entry<String, String> element = it.next();
			String key = element.getKey();
			String xpath = element.getValue();
			xpath = rootname + "/" + xpath;
			if (!xpath.endsWith("/")){
				xpath += "/";
			}
			
			if (path.equalsIgnoreCase(xpath)){
				doc.addFeature(key, node.getNodeValue());
			}			
		}
		
		NodeList children = node.getChildNodes();
		if (children != null){
			for(int i = 0; i < children.getLength(); ++i){ 
				RecursiveExtract(rootname, children.item(i), xpaths, doc, path);
			}
		}
		
		return true;
	}

}
