
/** 
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */
package com.tompai.datagather.example;

import com.tompai.datagather.model.CrawlDatum;
import com.tompai.datagather.model.CrawlDatums;
import com.tompai.datagather.model.Page;
import com.tompai.datagather.plugin.berkeley.BreadthCrawler;

/**
 * 本教程和深度遍历没有任何关系
 * 一些爬取需求希望加入深度信息，即遍历树中网页的层
 * 利用1.0版本中的新特性MetaData可以轻松实现这个功能
 *
 */
public class DemoDepthCrawler extends BreadthCrawler {

    public DemoDepthCrawler(String crawlPath, boolean autoParse) {
        super(crawlPath, autoParse);

        for (int i = 1; i <= 5; i++) {
            addSeed(new CrawlDatum("http://roll.news.qq.com/")
                    .meta("depth", 1));
        }

        /*正则规则用于控制爬虫自动解析出的链接，用户手动添加的链接，例如添加的种子、或
          在visit方法中添加到next中的链接并不会参与正则过滤*/
        addRegex("http://roll.news.qq.com/");
        /*不要爬取jpg|png|gif*/
        addRegex("-.*\\.(jpg|png|gif).*");
        /*不要爬取包含"#"的链接*/
        addRegex("-.*#.*");

    }

    @Override
    public void visit(Page page, CrawlDatums next) {
        System.out.println("visiting:" + page.url() + "\tdepth=" + page.meta("depth"));
    }

    @Override
    protected void afterParse(Page page, CrawlDatums next) {
        //当前页面的depth为x，则从当前页面解析的后续任务的depth为x+1
        int depth = 1;
        //如果在添加种子时忘记添加depth信息，可以通过这种方式保证程序不出错
        try {
            depth = page.metaAsInt("depth");
        } catch (Exception ex) {

        }
        depth++;
        next.meta("depth", depth);
    }


    public static void main(String[] args) throws Exception {
        DemoDepthCrawler crawler = new DemoDepthCrawler("./depth_crawler", true);
        crawler.getConf().setTopN(5);
        crawler.start(3);
    }

}
