package org.lionsoul.websnail.downloader.http;

import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.impl.client.CloseableHttpClient;
import org.lionsoul.websnail.Spider;
import org.lionsoul.websnail.downloader.Downloader;
import org.lionsoul.websnail.downloader.HttpClientProcessor;
import org.lionsoul.websnail.downloader.Proxy;
import org.lionsoul.websnail.util.WebUtils;

import java.io.IOException;
import java.nio.charset.Charset;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class HttpClientDownloader implements Downloader
{
    private final CloseableHttpClient httpClient;
    private HttpRequestFactory httpRequestFactory;
    private HttpClientProcessor processor;

    public HttpClientDownloader(Proxy proxy, HttpClientProcessor processor, HttpRequestFactory requestFactory)
    {
        org.apache.http.impl.client.HttpClientBuilder httpClientBuilder = HttpClientBuilder.getDefaultHttpClientBuilder();
        if (proxy != null) {
            httpClientBuilder.setProxy(new HttpHost(proxy.getHost(), proxy.getPort(), proxy.getProtocol()));
        }

        if (processor != null) {
            processor.initialize(httpClientBuilder);
            this.processor = processor;
        }
        this.httpClient = httpClientBuilder.build();
        this.httpRequestFactory = requestFactory;
    }

    public HttpClientDownloader(Proxy proxy)
    {
        this(proxy, null, null);
    }

    public HttpClientDownloader()
    {
        this(null, null, null);
    }

    /**
     * 使用自定义的 HttpClient 对象来创建下载器
     */
    public HttpClientDownloader(CloseableHttpClient client)
    {
        assert client != null;
        this.httpClient = client;
    }

    public HttpClientDownloader(CloseableHttpClient client, HttpRequestFactory httpRequestFactory)
    {
        assert client != null;
        this.httpClient = client;
        this.httpRequestFactory = httpRequestFactory;
    }

    public void setHttpRequestFactory(HttpRequestFactory httpRequestFactory)
    {
        this.httpRequestFactory = httpRequestFactory;
    }

    public void setProcessor(HttpClientProcessor processor)
    {
        this.processor = processor;
    }

    @Override
    public String process(Spider spider, String url)
    {

        HttpGet request;
        if (httpRequestFactory == null) {
            request = new HttpGet(url);
            // 设置头信息
            request.setHeader("user-agent", spider.getOptions().getCharset());
            // 设置连接超时
            RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(spider.getOptions().getConnectTimeout()).build();
            request.setConfig(requestConfig);

            // HttpRequest 对象处理
            if (processor != null) {
                processor.beforeProcess(request);
            }
        } else {
            request = httpRequestFactory.createHttpGet(url);
        }

        HttpClientContext context = HttpClientContext.create();
        CloseableHttpResponse response;
        // 执行 http 请求，获取返回结果
        try {
            response = httpClient.execute(request, context);
            String html = getPageContent(spider, response);
            response.close();
            return html;

        } catch (IOException e) {
            e.printStackTrace();
            return null;

        }
    }


    /**
     * 获取页面 HTML 内容
     */
    private String getPageContent(Spider spider, CloseableHttpResponse response)
            throws IllegalStateException, IOException
    {
        byte[] bytes = IOUtils.toByteArray(response.getEntity().getContent());
        String pageCharset = getPageCharset(response, bytes);

        //take the default Charset as missing Charset
        if (pageCharset == null && spider != null) {
            pageCharset = spider.getOptions().getCharset();
        }

        assert pageCharset != null;
        return new String(bytes, pageCharset);
    }

    // 匹配页面编码的正则表达式
    private static final Pattern charsetMetaPattern = Pattern.compile("<meta([^>]*?)/?>", Pattern.CASE_INSENSITIVE);

    // 获取 HTML 页面的 charset
    private String getPageCharset(CloseableHttpResponse response, byte[] bytes)
    {
        String charset = null;

        //1. analysis the Content-Type header first
        Header contentTypeHeader = response.getEntity().getContentType();
        if (contentTypeHeader != null
                && (charset = WebUtils.getCharset(contentTypeHeader.getValue())) != null) {
            return charset;
        }

        //2. analysis the meta markup to get the charset
        //html-4.1: <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
        //html-5.0: <meta charset="UTF-8" />
        String html = new String(bytes, Charset.defaultCharset());
        if (StringUtils.isBlank(html)) {
            return null;
        }

        Matcher matcher = charsetMetaPattern.matcher(html);
        while (matcher.find()) {
            String match = matcher.group(1);
            //System.out.println(match);
            if (!match.toLowerCase().contains("charset")) continue;

            //get the charset and break the loop if it is not null
            charset = WebUtils.getCharset(match);
            if (charset != null) break;
        }
        return charset;
    }

    @Override
    public void shutdown()
    {
        try {
            httpClient.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
