package com.zdlog.reptilian.main;


import com.zdlog.ler.dao.LerWebsiteDao;
import com.zdlog.ler.entity.LerWebsite;
import com.zdlog.reptilian.link.Links;
import com.zdlog.reptilian.page.Page;
import com.zdlog.reptilian.page.PageParserTool;
import com.zdlog.reptilian.page.RequestAndResponseTool;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.beans.factory.annotation.Autowired;

import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.UUID;

import static com.zdlog.reptilian.page.PageParserTool.select;

public class MyCrawler {

    @Autowired
    private LerWebsiteDao lerWebsiteDao;
    /**
     * 使用种子初始化 URL 队列
     *
     * @param seeds 种子 URL
     * @return
     */
    private void initCrawlerWithSeeds(String[] seeds) {
        for (int i = 0; i < seeds.length; i++){
            Links.addUnvisitedUrlQueue(seeds[i]);
        }
    }


    /**
     * 抓取过程
     *
     * @param seeds
     * @return
     */
    public void crawling(String[] seeds) {

        //初始化 URL 队列
        initCrawlerWithSeeds(seeds);

        //定义过滤器，提取以 http://www.baidu.com 开头的链接
//        LinkFilter filter = new LinkFilter() {
//            public boolean accept(String url) {
//                if (url.startsWith("http://search.top.chinaz.com/top.aspx?p=1&t=all"))
//                    return true;
//                else
//                    return false;
//            }
//        };

        //循环条件：待抓取的链接不空且抓取的网页不多于 1000
        while (!Links.unVisitedUrlQueueIsEmpty()  && Links.getVisitedUrlNum() <= 1000) {

            //先从待访问的序列中取出第一个；
            String visitUrl = (String) Links.removeHeadOfUnVisitedUrlQueue();
            if (visitUrl == null){
                continue;
            }

            //根据URL得到page;
            Page page = RequestAndResponseTool.sendRequstAndGetResponse(visitUrl);

            //对page进行处理： 访问DOM的某个标签
            Elements es = select(page,".YaHei");
            Page page1 = new Page(null, null, null);
            page1.setHtml(es.html());
            Set<String> links= aa(page1, ".ContTit");
          /*  if(!es.isEmpty()){
                System.out.println("下面将打印所有a标签： ");
                System.out.println(es);
            }*/
        }
    }


    //main 方法入口
    public static void main(String[] args) {
        MyCrawler crawler = new MyCrawler();
        crawler.crawling(new String[]{"http://search.top.chinaz.com/top.aspx?p=1&t=all"});
    }


    public   Set<String> aa(Page page ,String cssSelector) {
        Set<String> links  = new HashSet<String>() ;
        Elements es = select(page , cssSelector);
        Iterator iterator  = es.iterator();
        while(iterator.hasNext()) {
            Element element = (Element) iterator.next();
            String rangking = element.select(".fz16").text();
            String link = element.select(".PCop a").attr("href").split("site_")[1].split(".html")[0];
            String alexaRangking=element.select(".w120").text().split(" ")[0];
            String name = element.select(".PCop a").text();
            System.out.println("排名："+rangking+"     地址："+link+"           alexa排名："+alexaRangking+"         名称："+name);
            LerWebsite lerWebsite = new LerWebsite();
            lerWebsite.setWebsiteId(UUID.randomUUID().toString());
            lerWebsite.setAlexa_ranking(Integer.parseInt(alexaRangking));
            lerWebsite.setLink(link);
            lerWebsite.setName(name);
            lerWebsite.setRanking(Integer.parseInt(rangking));
            lerWebsiteDao.save(lerWebsite);
        }
        return links;
    }

}
