package com.passer.toolbox.common.crawler;

import lombok.extern.slf4j.Slf4j;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpStatus;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.HttpClientUtils;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

import java.io.IOException;
import java.util.Optional;

/**
 * <p>网页爬虫</p>
 * <p>创建时间：2023/4/26</p>
 *
 * @author hj
 */
@Slf4j
public class WebpageCrawler {
    // 生成httpclient，相当于该打开一个浏览器
    private final CloseableHttpClient httpClient = HttpClients.createDefault();

    public String getHtmlCode(String baseUrl, Header[] headers) throws IOException {
        String html = "";

        //2.创建get请求，相当于在浏览器地址栏输入 网址
        HttpGet request = new HttpGet(baseUrl);
        request.setHeaders(headers);
        //3.执行get请求，相当于在输入地址栏后敲回车键
        CloseableHttpResponse response = this.httpClient.execute(request);

        //4.判断响应状态为200，进行处理
        if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) {
            //5.获取响应内容
            HttpEntity httpEntity = response.getEntity();
            html = EntityUtils.toString(httpEntity, "UTF-8");
        }
        //6.关闭
        HttpClientUtils.closeQuietly(response);
        return html;
    }


    public Optional<Document> getHtmlDocument(String baseUrl, Header[] headers) {
        try {
            return Optional.of(Jsoup.parse(getHtmlCode(baseUrl, headers)));
        } catch (IOException e) {
            log.error("{} 页面内容获取失败：{}", baseUrl, e.getMessage());
        }
        return Optional.empty();
    }

    public void close() {
        HttpClientUtils.closeQuietly(this.httpClient);
    }

}


