package com.attilax.spider;

import java.util.Map;
import java.util.function.Function;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;

import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import com.attilax.io.filex;
import com.attilax.net.urlUtil;
import com.google.common.collect.Maps;

/**
 * Crawling news from hfut news
 *
 * @author hu
 */
public class WebCollectorSpiderRssAbs extends BreadthCrawler {
	String seedUrl;
	public Function<String, Boolean> titleChker;
	public Function<Map, Boolean> clawFinishHandler;
	/**
	 * @param crawlPath
	 *            crawlPath is the path of the directory which maintains
	 *            information of this crawler
	 * @param autoParse
	 *            if autoParse is true,BreadthCrawler will auto extract links
	 *            which match regex rules from pag
	 */
	public WebCollectorSpiderRssAbs(String crawlPath, boolean autoParse) {
		super(crawlPath, autoParse);
		/* start page */
	//	seedUrl =crawlPath;
		 
	

		/* ati for addAll */
		/* fetch url like http://news.hfut.edu.cn/show-xxxxxxhtml */
		// this.addRegex("http://news.hfut.edu.cn/show-.*html");
		/* do not fetch jpg|png|gif */
		// this.addRegex("-.*\\.(jpg|png|gif).*");
		/* do not fetch url contains # */
		// this.addRegex("-.*#.*");

	}
	
	public void ini()
	{
		this.addSeed(seedUrl);
	}

	@Override
	public void visit(Page page, CrawlDatums next) {

		try {
			String url = page.getUrl();
			/* if page is news page */
			// if (page.matchUrl("http://news.hfut.edu.cn/show-.*html")) {
			/* we use jsoup to parse page */
			Document doc = page.getDoc();

			/* extract title and content of news by css selector */
			String title = doc.title();

//			try {
//				this.titleChker.apply(title);
//			} catch (TitNotOk e) {
//				System.out.println("--tie too shortL:" + title);
//				return;
//			}

			// page.select("div[id=Article]>h2").first().text();
			String content = doc.html();
			// page.select("div#artibody", 0).text();

			System.out.println("-----------URL:" + url);
			System.out.println("-----------title:" + title);
			// System.out.println("content:\n" + content);

			Map m=Maps.newConcurrentMap();
			m.put("title", title);
			m.put("content", content);
			this.clawFinishHandler.apply(m);
		

			/* If you want to add urls to crawl,add them to nextLink */
			/*
			 * WebCollector automatically filters links that have been fetched
			 * before
			 */
			/*
			 * If autoParse is true and the link you add to nextLinks does not
			 * match the regex rules,the link will also been filtered.
			 */
			Elements as = doc.getElementsByTag("a");
			for (Element e : as) {
				String href = e.attr("href");
				
				 if( urlUtil.isFullUrl(href)  )
					 href=href;
				 else if (urlUtil.isAbsUrl(href)) {
					href = urlUtil.getUrlHead(seedUrl) + href;
				} else if (urlUtil.isRltUrl(href)) {
					href = addLastSplash(seedUrl) + href;
				} 

				String title2 = e.text();
				System.out.println(title2);
				try {
					this.titleChker.apply(title);
				} catch (TitNotOk e1) {
					System.out.println("--tie too shortL:" + title);
					continue;
				}
				next.add(href);
			}

			// }
		} catch (Exception e) {
			e.printStackTrace();
		}

	}

	public static String addLastSplash(String seedUrl2) {
		if (seedUrl2.endsWith("/"))
			return seedUrl2;
		else
			return seedUrl2 + "/";

	}

	public static void main(String[] args) throws Exception {
		WebCollectorSpiderRssAbs crawler = new WebCollectorSpiderRssAbs("crawlQc25", true);
		crawler.setThreads(10);
		crawler.setTopN(50);
		// crawler.setResumable(true);
		/* start crawl with depth of 4 */
		// crawler.start(2);

		String string = "https://zhidao.baidu.com/daily";
		System.out.println(urlUtil.getUrlHead(string));
		System.out.println("--f");
	}

}