package com.virjar.vscrawler.web.crawler.repository;

import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import com.google.common.collect.Lists;
import com.virjar.sipsoup.parse.XpathParser;
import com.virjar.vscrawler.core.VSCrawler;
import com.virjar.vscrawler.core.VSCrawlerBuilder;
import com.virjar.vscrawler.core.net.session.CrawlerSession;
import com.virjar.vscrawler.core.processor.GrabResult;
import com.virjar.vscrawler.core.processor.SeedProcessor;
import com.virjar.vscrawler.core.seed.Seed;
import com.virjar.vscrawler.web.api.CrawlerBuilder;
import org.apache.commons.lang3.StringUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

/**
 * Created by virjar on 2018/5/13.<br>
 * 测试简书
 */
public class BeautyGrab implements CrawlerBuilder {
    @Override
    public VSCrawler build() {
        return VSCrawlerBuilder.create().setCrawlerName("beautyCrawler")
                .setProcessor(new SeedProcessor() {

                    private List<String> nextPage(String url, CrawlerSession crawlerSession) {
                        if (StringUtils.isBlank(url)) {
                            return Collections.emptyList();
                        }
                        String html = crawlerSession.getCrawlerHttpClient().get(url);
                        if (StringUtils.isBlank(html)) {
                            return Collections.emptyList();
                        }
                        Document document = Jsoup.parse(html, url);
                        ArrayList<String> ret = Lists.newArrayList(XpathParser
                                .compileNoError(
                                        "/css('.content')::center/img/@src")
                                .evaluateToString(document));
                        List<String> strings = nextPage(XpathParser.compileNoError("/css('#pages a')::self()[contains(text(),'下一页')]/absUrl('href')").evaluateToSingleStr(document), crawlerSession);
                        ret.addAll(strings);
                        return ret;

                    }

                    @Override
                    public void process(final Seed seed, CrawlerSession crawlerSession, GrabResult grabResult) {
                        String param;
                        try {
                            JSONObject jsonObject = JSONObject.parseObject(seed.getData());
                            param = jsonObject.getString("param");
                        } catch (JSONException e) {
                            param = seed.getData();
                        }
                        String referUrl = "https://www.meitulu.com/item/" + param + ".html";
                        String html = crawlerSession.getCrawlerHttpClient().get(referUrl);
                        if (StringUtils.isBlank(html)) {
                            return;
                        }

                        Document document = Jsoup.parse(html, seed.getData());
                        grabResult.addResults(Lists.newArrayList(XpathParser
                                .compileNoError(
                                        "/css('.content')::center/img/@src")
                                .evaluateToString(document)));
                        List<String> strings = nextPage(XpathParser.compileNoError("/css('#pages a')::self()[contains(text(),'下一页')]/absUrl('href')").evaluateToSingleStr(document), crawlerSession);
                        grabResult.addResults(strings);
                    }
                }).build();
    }

    public static void main(String[] args) {
        VSCrawler vsCrawler = new BeautyGrab().build();
        JSONObject jsonObject = new JSONObject();
        jsonObject.put("crawlerName", "beautyCrawler");
        jsonObject.put("param", "2125");
        GrabResult grabResult = vsCrawler.grabSync(jsonObject.toJSONString());
        System.out.println(JSONObject.toJSONString(grabResult.allEntityResult()));
    }
}
