package com.finstone.data.spider.eshop;

import com.alibaba.fastjson.JSON;
import com.finstone.data.spider.eshop.service.ISpiderService;
import com.finstone.data.spider.eshop.service.impl.WeiBoPageService;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Attribute;
import org.jsoup.nodes.Attributes;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.safety.Whitelist;
import org.jsoup.select.Elements;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.pipeline.JsonFilePipeline;
import us.codecraft.webmagic.processor.PageProcessor;

import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * Created by Sean on 2016/3/22.
 */
public class GithubRepoPageProcessor implements PageProcessor {
    // 部分一：抓取网站的相关配置，包括编码、抓取间隔、重试次数等
    private Site site = Site.me().setRetryTimes(3).setSleepTime(2000).setCharset("utf-8");
    // 文本块正文匹配正则
    private final String blockRegex = "<script>STK\\s&&\\sSTK\\.pageletM\\s&&\\sSTK\\.pageletM\\.view\\(.*\\)";
    private Pattern pattern = Pattern.compile(blockRegex);
    private Document document;
    private List<String> requests;

    @Override
    // process是定制爬虫逻辑的核心接口，在这里编写抽取逻辑
    public void process(Page page) {
        requests = new ArrayList<String>();
        for (int i = 2; i < 42; i++) {
            requests.add("http://s.weibo.com/weibo/%2523%25E5%25BE%25AE%25E6%2591%2584%25E5%25BD%25B1%2523&page=" + i);
        }
        page.addTargetRequests(requests);

        // 部分二：定义如何抽取页面信息，并保存下来
//        page.putField("author", page.getUrl().regex("https://github\\.com/(\\w+)/.*").toString() + " author");
        String source = page.getHtml().toString();
        String replace = UnicodeConverter.toEncodedUnicode(source, true).replace("\\\\", "\\");
//        System.out.println("最终解码：---->   " + UnicodeConverter.fromEncodedUnicode(replace.toCharArray(), 0, replace.length()));
        // 匹配文本块
        Matcher m = pattern.matcher(source);
        while (m.find()) {
            String jsonStr = m.group();
//            System.out.println("字符串原型---->    " + jsonStr);
            jsonStr = jsonStr.substring(jsonStr.indexOf("{"), jsonStr.lastIndexOf(")"));
            // 解析json,转换为实体类
//            System.out.println("json字符串---->    " + jsonStr);
            jsonStr = UnicodeConverter.toEncodedUnicode(jsonStr, true).replace("\\\\", "\\");
            String str = UnicodeConverter.fromEncodedUnicode(jsonStr.toCharArray(), 0, jsonStr.length());
//            System.out.println("最终解码：---->   " + str);
            String htmlString = str.substring(str.indexOf("html\":\"") + 7, str.length() - 2);
            if (htmlString.trim().startsWith("<div class=\"search_feed\">")) {
                document = Jsoup.parse(htmlString);
            }
        }
        List<Element> elements = document.getElementsByAttributeValue("class", "WB_feed_detail clearfix");
        if (elements == null || elements.size() == 0) {
            System.out.println("No more urls to fetch with current keyword.");
            return;
        }
        for (Element elem : elements) {
            Element subEle = elem.getElementsByAttributeValue("class", "name_txt W_fb").first();
            if (subEle != null) {
                String nick_name = subEle.attr("nick-name");
                Attributes attributes = subEle.attributes();
                String usercard = "";
                for (Attribute attribute : attributes) {
                    if (attribute.getKey().contains("?refer_flag")) {
                        usercard = attribute.getKey().substring(0, attribute.getKey().length() - "?refer_flag".length());
                    }
                }
                System.out.println("用户名： " + nick_name + "\t\t 用户id：  " + usercard);
            }
        }
    }


    @Override
    public Site getSite() {
        return site;
    }

    public static void main(String[] args) {

        Spider.create(new GithubRepoPageProcessor())
                //从"https://github.com/code4craft"开始抓
//                .addUrl("https://github.com/code4craft")
//        .addUrl("https://github.com/GavinCT/AndroidMultiChannelBuildTool")
                .addUrl("http://s.weibo.com/weibo/%2523%25E5%25BE%25AE%25E6%2591%2584%25E5%25BD%25B1%2523&Refer=index")
//                .addPipeline(new JsonFilePipeline("D:\\webmagic\\"))
                //开启5个线程抓取
                .thread(1)
                //启动爬虫
                .run();
    }
}