package com.vs.crawl.news;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.regex.Pattern;

import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;

import org.htmlcleaner.CleanerProperties;
import org.htmlcleaner.DomSerializer;
import org.htmlcleaner.HtmlCleaner;
import org.htmlcleaner.TagNode;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.NodeList;

import cn.edu.hfut.dmic.webcollector.crawler.DeepCrawler;
import cn.edu.hfut.dmic.webcollector.fetcher.Fetcher.FetcherThread;
import cn.edu.hfut.dmic.webcollector.model.Links;
import cn.edu.hfut.dmic.webcollector.model.Page;

import com.html.ParseW3CNodeArrayNoPattern;
import com.model.xmlElement.input.Target;
/**
 * *********************************************
 * @author Administrator
 * @FileName SeedsCrawler.java
 * @Description种子收集器
 **********************************************
 */
public class SeedsCrawler extends DeepCrawler{
	public static final Logger LOG = LoggerFactory.getLogger(SeedsCrawler.class);
	
	private Target target;
	private boolean tryCrawler;
	
	public SeedsCrawler(String crawlPath, String xmlFileName,boolean tryCrawler) {
		super(crawlPath, xmlFileName);
		this.target = this.getDataColumns().getInput().getTarget();
		this.tryCrawler = tryCrawler;
	}
	
	private Document getXpathDocumentByXpath(String xpath,Document document) throws ParserConfigurationException, XPathExpressionException{
		Document xpathDocument = null;
		StringBuilder xpathStringBulider = new StringBuilder();
		LOG.info("___________________________________________________________________");
		HtmlCleaner htmlCleaner = new HtmlCleaner();
		DomSerializer domSerializer = new DomSerializer(new CleanerProperties());
		XPath xPath = XPathFactory.newInstance().newXPath();
		TagNode tagNode = htmlCleaner.clean(document.html());
		org.w3c.dom.Document dom = domSerializer.createDOM(tagNode);
		Object result = xPath.evaluate(xpath, dom, XPathConstants.NODESET);
		if (result instanceof NodeList) {
			NodeList nodeList = (NodeList) result;
			int resultLength = nodeList.getLength();
			LOG.info("" + resultLength);
			org.w3c.dom.Node[] nodes = new org.w3c.dom.Node[resultLength];
			ParseW3CNodeArrayNoPattern parseW3CNodeArray = new ParseW3CNodeArrayNoPattern();
			for (int i = 0; i < resultLength; i++) {
				nodes[i] = nodeList.item(i);
				String nodeHtml = parseW3CNodeArray.nodeToHtml(nodes[i]);
				LOG.info(nodeHtml);
				xpathStringBulider.append(nodeHtml);
			}
		}
		xpathDocument = Jsoup.parse(xpathStringBulider.toString());
		return xpathDocument;
	}
	
	@Override
	public Links visitAndGetNextLinks(Page page) {
		/* 返回链接，递归爬取 */
		Links nextLinks = new Links();
		
		Document document = page.getDoc();
		int level = page.getLevel();
		
		//配置了xpath
		String xpath = target.getXpath();
		if(level == 1 && xpath != null && !"".equals(xpath)){
			try {
				document = getXpathDocumentByXpath(xpath, document);
			} catch (XPathExpressionException e) {
				e.printStackTrace();
			} catch (ParserConfigurationException e) {
				e.printStackTrace();
			}
		}
		
		Elements as = document.select("a");
		//取特定的URL
		String url;
		for(Element a : as){
			url = a.absUrl("href");
			if(url != null && !"".equals(url)){
				if(target.getPatten() != null && !"".equals(target.getPatten())){
					String[] parttens = target.getPatten().split("\\|");
					boolean matched = false;
					for(int i = 0,len = parttens.length;i < len;i++){
						if(Pattern.matches(parttens[i], url)){
							matched = true;
							break;
						}
					}
					if(matched){
						seeds.add(url);
					}
				}else{
					seeds.add(url);
				}
			}
		}
		//如果是试抓
		if(tryCrawler){
			synchronized (seeds) {
				//去除target_baseurl
				String[] baseurls = target.getBaseUrl().split(",");
				for(int i = 0,len = baseurls.length;i < len;i++){
					seeds.remove(baseurls[i]);
				}
				//试抓需要的种子已经满足了
				if(seeds.size() >= 10){
					this.setOverCrawlerSeed(true);
					synchronized (this.getFetcher()) {
						List<String> list = new ArrayList<String>(seeds);
						seeds = new HashSet<String>(list.subList(0, 10));
						FetcherThread[] fetcherThreads = this.getFetcher().getFetcherThreads();
						this.getFetcher().stopCrawler(fetcherThreads);
					}
				}
			}
		}
		
		nextLinks.addAllFromDocument(document);
		return nextLinks;
	}
	
	@Override
	public void createCustomSeeds() {
		String[] bases = target.getBaseUrl().split(",");
		for(int i = 0,len = bases.length;i  <len;i++){
			seeds.add(bases[i]);
		}
	}
	
	@Override
	public void createFileRule() {}

	public Target getTarget() {
		return target;
	}

	public void setTarget(Target target) {
		this.target = target;
	}

	
}
