package my.crawler.impl;


import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.List;

import javax.xml.namespace.QName;
import javax.xml.xpath.*;

import org.cyberneko.html.parsers.DOMParser;
import org.w3c.dom.*;
import org.xml.sax.*;

import my.crawler.Crawler;
import my.crawler.CrawlerTrainer;
import my.crawler.data.ObjectData;

/**
 * Algorithm description:
 * 1. We set current page
 * 1.1. build xml tree.
 * 2*. We add tag
 * 2.1.  
 * @author Root
 */
public class CrawlerTrainerImpl implements CrawlerTrainer, Crawler {
	
	private String content;
	private Document doc; 
	DOMParser parser = new DOMParser();
	
	public static class MyFunc implements XPathFunction {

		public Object evaluate(List args) throws XPathFunctionException {
			return false;
		}
		
	}
	static class FunctionContext implements XPathFunctionResolver {

		  private static final QName name 
		   = new QName("http://www.example.org/books", "valid-isbn");

		  public XPathFunction resolveFunction(QName name, int arity) {
			  if(name.getLocalPart().equals("my-func")) {
				  return new MyFunc();
			  }
		      return null;
		  }
	}
	public void addTag(String text, String path, String tag) throws Exception {
		// TODO Auto-generated method stub
		XPathFactory factory = XPathFactory.newInstance();
		XPath xpath = factory.newXPath();
		xpath.setXPathFunctionResolver(new FunctionContext());
		XPathExpression expr = xpath.compile("//*[contains(self,'"+text+"')]");
		Node node = (Node) expr.evaluate(doc, XPathConstants.NODE);
		
	}

	public Crawler buildCrawler() {
		return this;
	}

	public void setCurrentPage(String content, String url) throws Exception {
		this.content = content;
			parser.parse(new InputSource( new ByteArrayInputStream(content.getBytes())));

		doc = parser.getDocument();
	}

	public List<ObjectData> crawl() {
		// TODO Auto-generated method stub
		return null;
	}

}
