package com.wgu.crawl.util;

import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;

/**
 * 爬虫工具类
 * @Author: w
 * @Date: 2019/6/25 16:58
 * @Version 1.0
 */
public final class CrawlUtil {
    /**
     * 爬取当前页面内容
     * @param url 爬取的url
     * @param charset 当前页面字符编码，如：UTF-8
     * @return
     */
    public static String crawl(String url,String charset){
        try {
            // 获得Http客户端(可以理解为:你得先有一个浏览器;注意:实际上HttpClient与浏览器是不一样的)
            CloseableHttpClient httpClient = HttpClientBuilder.create().build();
            HttpGet httpGet = new HttpGet(url);
            CloseableHttpResponse httpResponse = httpClient.execute(httpGet);
            return EntityUtils.toString(httpResponse.getEntity(),charset);
        }catch (Exception e){
            throw new RuntimeException(e);
        }
    }

    /**
     * 获取网站解析内容
     * @param url 爬取的url
     * @param charset 当前页面字符编码，如：UTF-8
     * @param role 解析规则 如：.tagdh_02 a
     * @return
     */
    public static Elements analy(String url,String charset,String role){
        String result = crawl(url,charset);
        Document document= Jsoup.parse(result);
        document.setBaseUri(url);
        return document.select(role);
    }

    /**
     * 根据已爬取的类型解析网站
     * @param url
     * @param result 爬取结果
     * @param role
     * @return
     */
    public static Elements analyByRole(String url,String result,String role){
        Document document= Jsoup.parse(result);
        document.setBaseUri(url);
        document.outputSettings(new Document.OutputSettings().prettyPrint(false)); //使html（）保留换行符并间隔
        document.select("br").append("\\n");
        document.select("p").prepend("\\n\\n");
        return document.select(role);
    }
}
