package com.jt.blog.crawler;

import com.jt.blog.model.Blog;
import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.url.WebURL;
import org.apache.tika.parser.html.HtmlParser;
import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.CssSelectorNodeFilter;
import org.htmlparser.util.NodeIterator;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

import java.util.Set;
import java.util.regex.Pattern;

/**
 * 爬虫
 * @author TIM
 * @create 2016-08-30 10
 **/
public class MyCrawler extends WebCrawler {

    private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|gif|jpg"
            + "|png|mp3|mp3|zip|gz))$");

    private final static Pattern BLOG_REG = Pattern.compile("https://my\\.oschina\\.net/taoluoluo/blog/[0-9]+");

    @Override
    public boolean shouldVisit(Page referringPage, WebURL url) {
        String href = url.getURL().toLowerCase();
        return BLOG_REG.matcher(href).matches()
                && href.startsWith("https://my.oschina.net/");
    }


    @Override
    public void visit(Page page) {
        String url = page.getWebURL().getURL().toLowerCase();

        if (page.getParseData() instanceof HtmlParseData) {
            HtmlParseData htmlParseData = (HtmlParseData) page.getParseData();
            String html = htmlParseData.getHtml();
            if (BLOG_REG.matcher(url).matches()){
                try {
                    Parser parser = new Parser(html);
                    NodeFilter titleFilter = new CssSelectorNodeFilter("div[class='title']>span");
                    NodeList titleList = parser.extractAllNodesThatMatch(titleFilter);
                    Blog blog = new Blog();
                    if(titleList!=null&&titleList.size()>0){
                        Node titleNode = titleList.elementAt(0).getNextSibling();
                        String title = titleNode.toPlainTextString().replaceAll("\n","").trim();
                        blog.setTitle(title);
                    }
                    parser = new Parser(html);
                    NodeFilter abstractFilter = new CssSelectorNodeFilter("div[class='blog-abstract']");
                    NodeList abstractList = parser.extractAllNodesThatMatch(abstractFilter);
                    if(abstractList!=null&&abstractList.size()>0){
                        String subject = abstractList.elementAt(0).toPlainTextString().replaceAll("\n","").trim();
                        blog.setSubject(subject);
                    }
                    parser = new Parser(html);
                    NodeFilter contentFilter = new CssSelectorNodeFilter("div[class='BlogContent']");
                    NodeList contentList = parser.extractAllNodesThatMatch(contentFilter);
                    if(contentList!=null&&contentList.size()>0){
                        String content = contentList.elementAt(0).getChildren().toHtml().trim();
                        blog.setContent(content);
                    }
                    parser = new Parser(html);
                    NodeFilter tagFilter = new CssSelectorNodeFilter("span[class='tag']>a");
                    NodeList tagList = parser.extractAllNodesThatMatch(tagFilter);
                    if(tagList!=null&&tagList.size()>0){
                        String tags = "";
                        for (int i=0 ;i<tagList.size();i++){
                            tags += tagList.elementAt(i).toPlainTextString().replaceAll("\n","").trim()+",";
                        }
                        tags = tags.substring(0,tags.length()-1);
                        blog.setKeywords(tags);
                    }
                } catch (ParserException e) {
                    e.printStackTrace();
                }
            }
        }
    }
}
