package com.asiainfo.zqx;


import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import org.json.JSONArray;
import org.json.JSONObject;
import org.json.JSONException;


public class WangYIxinwen extends BreadthCrawler {
    private String seedurl01 = "https://money.163.com/special/00259BVP/news_flow_index.js?callback=data_callback";
    private String seedurl02 = "https://money.163.com/special/00259BVP/news_flow_index_02.js?callback=data_callback";
    private String seedurl03 = "https://money.163.com/special/00259BVP/news_flow_index_03.js?callback=data_callback";
   private String dyregurl="https://www.163.com/dy/article/";
   private String monregurl="https://www.163.com/money/article/";
   private String wriregurl="https://www.163.com/dy/media/";
   int num=0;
    public WangYIxinwen(String crawlPath) {
        super(crawlPath, false);
        addSeed(new CrawlDatum(seedurl01,"js"));
        addSeed(new CrawlDatum(seedurl02,"js"));
        addSeed(new CrawlDatum(seedurl03,"js"));
        setThreads(3);
      //  setResumable(true);
    }

    @Override
    public void visit(Page page, CrawlDatums crawlDatums) {
        String contentType = page.contentType();
        if (contentType == null) {
            return;
        }
        if (page.matchType("text")&&page.select("div.post_info>a").first().attr("abs:href").contains(wriregurl)) {
            System.out.println("链接为" + page.url());
            System.out.println("标题为:" + page.select("div.post_main>h1.post_title").first().text());
            System.out.println("时间为"+page.select("div.post_info").text().substring(0,19));
            System.out.println("来源为网易新闻");
            System.out.println("作者为"+page.select("div.post_info>a").first().text());
            System.out.println("正文为"+page.select("div.post_body").text());
            System.out.println("第"+(++num)+"个");
        } else if(page.matchType("js")){
            String str = page.html().toString();
            //截取需要的数据
            String getSignInfo = str.substring(str.indexOf("(") + 1, str.indexOf(")"));
            //转为jsonarray数组
            JSONArray objar = null;
            try {
                objar = new JSONArray(getSignInfo);
                for (int i = 0; i < objar.length(); i++) {
                    JSONObject obj= (JSONObject)objar.get(i);
                    String url=obj.getString("docurl");
                    if(url.contains(monregurl)||url.contains(dyregurl)){
                        crawlDatums.add(new CrawlDatum(url,"text"));
                    }

                }
            } catch (JSONException e) {
                e.printStackTrace();
            }

        }
    }


    public static void main(String[] args) throws Exception {
        WangYIxinwen xin = new WangYIxinwen("wangyixinwen");
        xin.start(2);
    }
}
