package org.viki.ljspider;

import java.util.Set;
import java.util.regex.Pattern;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.url.WebURL;

public class HtmlHelper extends WebCrawler{
	/**
     * 正则匹配指定的后缀文件
     */
    private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|gif|jpe?g"
                                                           + "|png|tiff?|mid|mp2|mp3|mp4"
                                                           + "|zip|gz|wav|avi|mov|mpeg|ram"
                                                           + "|m4v|pdf|rm|smil|wmv|swf|wma"
                                                           + "|rar))$");
	public boolean shouldVisit(WebURL url){
		String href = url.getURL().toLowerCase();  // 得到小写的url
        return !FILTERS.matcher(href).matches()   // 正则匹配，过滤掉我们不需要的后缀文件
               && href.startsWith("https://bj.lianjia.com/chengjiao/");
		
	}
	public void visit(Page page){
		String url = page.getWebURL().getURL();  // 获取url
        System.out.println("URL: " + url);

        if (page.getParseData() instanceof HtmlParseData) {  // 判断是否是html数据
            HtmlParseData htmlParseData = (HtmlParseData) page.getParseData(); // 强制类型转换，获取html数据对象
            String text = htmlParseData.getText();  // 获取页面纯文本（无html标签）
            String html = htmlParseData.getHtml();  // 获取页面Html
            Document doc = Jsoup.parse(html);
//            String s = doc.select("ul.listContent").text();
//            System.out.println("house:" + house);
            Elements contents = doc.select("ul[class=listContent]"); 
            for(final Element c: contents){
            	
            	final String link = c.select("a[class=img]").attr("href");//链接
            	final String title = c.select("div[class=title]").first().text();//title
            	final String houseInfo = c.select("div[class=houseInfo]").first().text();//houseInfo
            	final String dealDate = c.select("div[class=dealDate]").first().text();//dealDate
            	final String totalPrice = c.select("div[class=totalPrice]").first().text();//totalPrice
            	final String unitPrice = c.select("div[class=unitPrice]").first().text();//unitPrice
            	final String tag = c.select("span[class=dealHouseTxt]").first().text();//tag
            	
            	System.out.println("link:" + link + " title:" + title + " houseInfo:" + houseInfo
            			 + " dealDate:" + dealDate + " totalPrice:" + totalPrice + " unitPrice:" + unitPrice
            			 + " tag:" + tag);
            	
            }
            
            Set<WebURL> links = htmlParseData.getOutgoingUrls();  // 获取页面输出链接

//            System.out.println("纯文本长度: " + text.length());
//            System.out.println("html长度: " + html.length());
//            System.out.println("输出链接个数: " + links.size());
        }
	}
	
}
