package spider.web;

import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.function.Predicate;

/**
 * @author: zhangzeli
 * @date 11:51 2018/5/5
 * <P>宽度爬虫主类</P>
 */
public class MyCrawler {

    /**
     * 初始化种子队列
     * @param seeds
     */
    private void initCrawlerWithSeeds(String[]seeds){
        for (int i=0;i<seeds.length;i++){
            LinkQueue.addUnvisItedUrl(seeds[i]);
        }
    }

    /**
     * 抓取过程
     * @param seeds
     */
    public void crawling(String[]seeds){
        Predicate<String> t = (s)->{
            if(s.startsWith("http://wh.58.com")){
                return true;
            }else{
                return false;
            }
        };
        //初始化
        initCrawlerWithSeeds(seeds);
        //循环条件:待抓取的连接不空且抓取的网页不多于1000
        while (!LinkQueue.unVisitedUrlEmpty()&&LinkQueue.getVisitedUrlNum()<1000){
            //队列URL出列
            String visitUrl = (String) LinkQueue.unVisitedUrlDequeue();
            if(visitUrl==null||visitUrl.equals("")){
                continue;
            }
            DownLoadFile downLoadFile = new DownLoadFile();
            downLoadFile.downloadFile(visitUrl);
            //改url加入已访问的url中
            LinkQueue.addVisitedUrl(visitUrl);
            //提取下载网页的url
            Set<String> links = HtmlParserTool.extracLinks(visitUrl,t);
            //新的url入队
            for (String link:links){
                LinkQueue.addUnvisItedUrl(link);
            }
        }
    }

    public static void main(String[] args) {
        MyCrawler spider = new MyCrawler();
        spider.crawling(new String[]{"http://wh.58.com"});
    }
}
