package com.spider.core;

import com.spider.exception.ProcessLogicNotFoundException;
import com.spider.page.PageFactory;
import com.spider.page.ResultItem;
import com.spider.page.ResultPage;
import com.spider.pool.ThreadPool;
import com.spider.pool.UriQueueScheduler;
import com.spider.strategy.ResponseStrategy;
import org.apache.http.Header;
import org.apache.http.HttpStatus;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.LayeredConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
import org.apache.http.conn.ssl.TrustStrategy;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.client.LaxRedirectStrategy;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeader;
import org.apache.http.ssl.SSLContextBuilder;
import org.apache.http.ssl.SSLContexts;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.net.ssl.SSLContext;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.KeyManagementException;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;

/**
 * @author β世界
 * Created on 16:52
 * @Description 爬虫的客户端
 * 启动一个爬虫，需要通过build方法来创建一个爬虫客户端
 */
@SuppressWarnings({"unused"})
public class SpiderClient{
    private final static String SET_COOKIE = "set-cookie";
    private final static String COOKIE = "cookie";
    private final static Logger logger = LoggerFactory.getLogger(SpiderClient.class);
    /** 爬虫的整体设置 **/
    private final SpiderConfig spiderConfig;
    /** 爬虫客户端的设置内容 **/
    private final RequestConfig requestconfig;
    /** 客户端连接池管理 **/
    private PoolingHttpClientConnectionManager poolConnManage;
    /** 线程安全的客户端，可以供多个线程来使用 **/
    private CloseableHttpClient client = null;
    /** URL连接池,用于存储未被处理和已处理的URL **/
    private final UriQueueScheduler uriQueueScheduler = new UriQueueScheduler();
    /** 创建一个线程池 **/
    private ThreadPool executor = null;
    /** 种子URL **/
    private final ArrayList<URI> seedUrl = new ArrayList<>();
    /** 处理逻辑 **/
    private final List<ProcessLogic> processLogicList = new ArrayList<>();
    /** Page创建工厂 **/
    private PageFactory pageFactory;
    /** 结果存储空间 **/
    private final ResultItem resultItem = new ResultItem();
    /** 等待线程继续爬取的睡眠时间 **/
    private static final long SLEEP_TIME = 5000L;

    /**
     * @author β世界 on 22:47 2021/5/6
     * @Description 初始化连接池内容
     **/
    private void initConnectionPool() {
        try {
            RegistryBuilder<ConnectionSocketFactory> registryBuilder = RegistryBuilder.create();
            if(spiderConfig.isHttps()){
                // SSL验证策略
                SSLContextBuilder builder = new SSLContextBuilder();
                builder.loadTrustMaterial(null, new TrustSelfSignedStrategy());

                // 指定信任密钥存储对象和连接套接字工厂
                KeyStore trustStore = KeyStore.getInstance(KeyStore.getDefaultType());
                // 信任任何链接
                TrustStrategy anyTrustStrategy = (x509Certificates, s) -> true;
                SSLContext sslContext = SSLContexts.custom().useProtocol("TLS").loadTrustMaterial(trustStore, anyTrustStrategy).build();
                LayeredConnectionSocketFactory layeredConnectionSocketFactory = new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE);
                registryBuilder.register("https", layeredConnectionSocketFactory);
                // 创建SSLSocket连接工厂
                SSLConnectionSocketFactory sslConnectionSocketFactory = new SSLConnectionSocketFactory(builder.build());
                // 配置HTTPS的请求支持
                registryBuilder.register("https", sslConnectionSocketFactory);
            }
            registryBuilder.register("http", PlainConnectionSocketFactory.getSocketFactory());

            // 构建socket工厂
            Registry<ConnectionSocketFactory> socketFactoryRegistry = registryBuilder.build();
            // 初始化HTTP连接管理器
            poolConnManage = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
            // 同时最多连接数
            poolConnManage.setMaxTotal(Integer.MAX_VALUE);
            // 设置最大路由，单个域名的最大连接数
            poolConnManage.setDefaultMaxPerRoute(Integer.MAX_VALUE);

        } catch (NoSuchAlgorithmException | KeyManagementException | KeyStoreException e) {
            e.printStackTrace();
            logger.warn("Init HttpClient error...");
        }
    }

    /***
     * @author β世界 on 16:57 2020/1/28
     * @Description 创建爬虫的客户端
     * @param spiderConfig:spider的设置对象
     **/
    public SpiderClient(SpiderConfig spiderConfig,RequestConfig requestConfig){
        this.spiderConfig = spiderConfig;
        this.requestconfig = requestConfig;
        // 初始化爬虫客户端内容
        logger.info("Init SpiderClient...");
        // 初始化连接池
        initConnectionPool();
        // 设置客户端的
        initClient();
        // 初始化PageFactory
        initPageFactory();
        // 初始化线程池
        initThreadPool(spiderConfig);
    }

    /**
     * @author β世界 on 15:10 2021/3/17
     * @Description 获取当前爬虫的设置内容
     * @return com.spider.core.SpiderConfig
     **/
    public SpiderConfig getSpiderConfig() {
        return spiderConfig;
    }

    public SpiderClient addProcessLogic(ProcessLogic processLogic){
        this.processLogicList.add(processLogic);
        return this;
    }

    /**
     * 添加种子URL（我们需要对这些URL进行爬取，获取子链接）
     * @author β世界 on 7:45 2020/6/29
     **/
    public SpiderClient addSeedUrl(String... urls) { // 添加种子URL
        for (String url : urls) {
            try {
                seedUrl.add(new URI(url));
                logger.info("Add Seed URL:" + url);
            } catch (URISyntaxException e) {
                e.printStackTrace();
            }
        }
        return this;
    }

    /**
     * @author β世界 on 9:27 2020/12/9
     * @Description 用于添加cookie
     * @param oldCookieValue: 旧的Header
     * @param newCookie: 需要被添加的Header
     * @return org.apache.http.Header
     **/
    private Header addCookie(String oldCookieValue, Header... newCookie){
        StringBuilder cookieValue = new StringBuilder(oldCookieValue);
        for(Header cookie : newCookie){
            cookieValue.append(cookie.getValue()).append(";");
        }
        return new BasicHeader("cookie",cookieValue.toString());
    }

    /***
     * @author β世界 on 23:53 2020/2/10
     * @Description 客户端进行初始化的设置
     **/
    private void initClient(){
        // 请求重试的配置
        LaxRedirectStrategy redirectStrategy = new LaxRedirectStrategy();
        HttpRequestRetryHandler httpRequestRetryHandler = new HttpRequestRetry();
        client = HttpClients.custom()
                // 设置重试策略
                .setRedirectStrategy(redirectStrategy)
                .setRetryHandler(httpRequestRetryHandler)
                // 请求参数的基本设置
                .setDefaultRequestConfig(requestconfig)
                // 设置连接池管理
                .setConnectionManager(poolConnManage)
                .build();
     }

    /**
     * 初始化Page创建工厂
     * @author β世界 on 11:09 2021/5/10
     **/
    private void initPageFactory(){
        pageFactory = new PageFactory(uriQueueScheduler, spiderConfig.getResponseStrategyArrayList().toArray(new ResponseStrategy[spiderConfig.getResponseStrategyArrayList().size()]));
    }

    /**
     * 初始化线程池
     * @author β世界 on 14:14 2021/5/10
     **/
    private void initThreadPool(SpiderConfig spiderConfig){
        // 创建线程池，规划了线程池的最大核心数，即同时能有多少个线程在处理
        executor = new ThreadPool(spiderConfig.getThreadNumber(),spiderConfig.getThreadNumber(),1L);
    }

    /**
     * @author β世界 on 23:51 2020/2/10
     * @Description 创建创建请求类型
     * @return org.apache.http.client.methods.HttpRequestBase
     **/
    private HttpRequestBase buildHttpRequest(){
        HttpRequestBase httpType; // 客户端的请求类型
        if(spiderConfig.isGet()){
                httpType = new HttpGet();
        }else{
            httpType = new HttpPost();
            // 添加请求参数
            if(!spiderConfig.getParam().isEmpty()){
                try {
                    UrlEncodedFormEntity urlEncodedFormEntity = new UrlEncodedFormEntity(spiderConfig.getParam(),"UTF-8");
                    ((HttpPost)httpType).setEntity(urlEncodedFormEntity);
                } catch (UnsupportedEncodingException e) {
                    e.printStackTrace();
                }
            }
        }
        // 设置浏览器的类型
        httpType.addHeader("User-Agent", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.6)");
        // 传输的类型
        httpType.addHeader("Content-Type", "application/x-www-form-urlencoded");
        // 添加用户自定义的header
        for (int i = 0; i < spiderConfig.getHeaders().size(); i++) {
            Header header = new BasicHeader(spiderConfig.getHeaders().get(i).getName(),spiderConfig.getHeaders().get(i).getValue());
            httpType.addHeader(header);
            logger.debug("request header setting : "+header);
        }
        // 设置请求
        httpType.setConfig(requestconfig);

        return httpType;
    }
    
    /***
     * @author β世界 on 19:36 2020/2/8
     * @Description 处理种子URL
     * 1.将所有添加到untreated列表的所有链接清空
     * 2.并且处理所有的结果
     **/
    public void processSeedUri() {
        // 获得请求类型
        HttpRequestBase httpType = buildHttpRequest();
        try {
            logger.info("Process seed Url...");

            CloseableHttpResponse response = null;

            // 检查是否要处理种子链接
            // 处理条件为：种子数大于线程数的1.5倍

            // 处理一半种子池中的链接，处理完毕之后，将所有的种子连接丢到正常的URL池中
            for (int index = 0; (index*2) < seedUrl.size(); index++) {
                // 设置URL
                httpType.setURI(seedUrl.remove(0));
                // 发送请求
                response = execute(httpType);
                // 处理响应的请求
                processResponse(response,httpType);
            }

            if(response != null){
                response.close(); // 关闭连接
                httpType.clone();
            }

            // 将所有的种子池中的连接丢到正常池中
            while(!seedUrl.isEmpty()){
                uriQueueScheduler.addUrl(seedUrl.remove(0).toString());
            }
            logger.info("Process Seed Url Over...");
        } catch (Exception e){
            e.printStackTrace();
        }
    }

    /***
     * @author β世界 on 21:57 2020/2/3
     * @Description 开始爬取
     * 1.先对种子链接进行爬取，获取需要爬取的内容(需要先执行一次用户定义的爬虫逻辑)
     * 2.创建多个线程来实现多个客户端
     **/
    public void start(){
        // 先对种子链接进行爬取
        processSeedUri();
        // 初始化线程计数器
        executor.initCountDownLatch(spiderConfig.getThreadNumber());

        // 创建一个爬虫执行对象
        SpiderRun spider = new SpiderRun();
        // 创建指定数量的线程去执行任务
        for (int i = 0; i < spiderConfig.getThreadNumber(); i++) {
            executor.execute(spider);
        }

        // 等待线程执行完成
        executor.await();

        // 检查是否还有活跃的线程
        while(executor.checkExistActive()){
            // 如果还有存活的线程，那么就休眠五秒起来再判断
            sleep();
        }

        // 检查线程池是否已经为空
        if(uriQueueScheduler.getUntreatedSize() != 0){
            // 当还有链接没有爬取完成，那么就将这些剩余的链接输出到日志中
            for (int i = 0; i < uriQueueScheduler.getUntreatedSize(); i++) {
                logger.info("Untreated URL:"+uriQueueScheduler.getUntreatedUrl());
            }
        }
        // 关闭爬虫
        if(spiderConfig.isAutoClose()){
            shutdown();
        }
    }

    /**
     * @author β世界 on 17:14 2020/2/8
     * @Description 爬虫的执行方法
     * 1.发出请求，将得到的结果封装为Page(需要检查response的响应体来检查是什么类型的响应内容【JSON、html】)
     * 2.对请求的响应状态做出响应的反应(如:200(通过)、其他的)
     *
     * 已知异常:
     * 1.java.net.UnknownHostException (未知主机时抛出异常)
     * 2.httpType.apache.http.conn.HttpHostConnectException (主机连接超时时抛出)
     * 3.java.io.EOFException: SSL peer shut down incorrectly (使用的协议与目标主机不符合)
     *
     * @param httpType: 请求类型
     * @return org.apache.http.HttpResponse 一个请求得到的响应结果
     **/
    public CloseableHttpResponse execute(HttpRequestBase httpType) throws IOException, URISyntaxException {
        // 请求的返回内容
        CloseableHttpResponse response;
        // 获取返回内容和响应实体
        response = client.execute(httpType);
        // 输出当前URL的请求状态
        logger.info(httpType.getURI().toString() + " Request Status: " + response.getStatusLine().getStatusCode());

        // 处理所有的3xx状态码
        int responseStatus = response.getStatusLine().getStatusCode();
        if(responseStatus >= HttpStatus.SC_MULTIPLE_CHOICES && responseStatus <= HttpStatus.SC_TEMPORARY_REDIRECT){
            Header header = response.getFirstHeader("location");
            httpType.setURI(new URI(header.getValue()));
            httpType.addHeader("Referer", header.getValue());
            // 设置cookie
            if(response.getFirstHeader(SET_COOKIE) != null){
                if(httpType.getFirstHeader(COOKIE) == null){
                    httpType.addHeader(addCookie("",response.getHeaders(SET_COOKIE)));
                }else{
                    httpType.setHeader(addCookie(httpType.getFirstHeader(COOKIE).getValue(),response.getHeaders(SET_COOKIE)));
                }
            }
            // 配置完成所有内容，再次发送请求
            response = execute(httpType);
        }

        return response;
    }

    /**
     * @author β世界 on 17:52 2021/3/17
     * 处理请求的响应结果
     * @param response: 响应结果
     * @param httpType: 请求方式
     **/
    private void processResponse(CloseableHttpResponse response,HttpRequestBase httpType){
        if(isProcess(response)){
            // 将结果封装成Page
            ResultPage resultPage = pageFactory.createPage(response,httpType);
            // 设置结果存储空间
            resultPage.setResultItem(resultItem);
            getProcessLogic(resultPage).logic(resultPage);
            try {
                // 检查流是否还存在
                EntityUtils.consume(response.getEntity());
                // 爬虫的间歇
                Thread.sleep(spiderConfig.getInterval());
            } catch (IOException | InterruptedException e) {
                e.printStackTrace();
            }
        }
    }

    /**
     * @author β世界 on 22:26 2021/3/17
     * 用于关闭所有的连接内容
     **/
    public void shutdown(){
        // 检查连接池中的内容是否还有剩余
        logger.info("关闭连接池，检查链接池中是否还有剩余...");
        if(uriQueueScheduler.getUntreatedSize() != 0){
            // 展示就只是输出到日志中进行提示
            logger.warn("剩余链接："+uriQueueScheduler.getUntreatedUrl());
        }else{
            logger.info("连接池无剩余。");
        }
        // 关闭连接池
        uriQueueScheduler.clearProcessed();
        uriQueueScheduler.clearUntreated();

        // 关闭客户端
        try {
            client.close();
            logger.info("ClientHttp已关闭");
        } catch (IOException e) {
            e.printStackTrace();
            logger.info("ClientHttp关闭失败");
        }
        // 检查运行池中是否还存在活跃连接
        if (executor.checkExistActive()){
            // 如果还存在链接，那么等待一分钟，如果还是存在就直接终止链接
            try {
                Thread.sleep(30000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
        // 关闭线程池
        executor.shutdown();
        // 关闭连接池
        poolConnManage.close();
        logger.info("ClientHttp连接池关闭");
    }

    /**
     * @author β世界 on 14:42 2021/3/17
     * 用于检查是否处理响应
     * @param response: 响应结果
     * @return boolean
     **/
    public boolean isProcess(CloseableHttpResponse response){
        // 检查是否是合法的响应码
        if(response != null){
            int responseStatus = response.getStatusLine().getStatusCode();
            // 如果响应的结果是200或者存在于白名单,那么就放行
            return responseStatus == HttpStatus.SC_OK || ResponseStatusWhiteList.isIncludeStatus(responseStatus);
        }
        return false;
    }

    /**
     * 获取到当前结果页的处理对象
     * @author β世界 on 8:55 2021/5/11
     * @param resultPage: 当前结果页
     * @return com.spider.core.ProcessLogic
     **/
    private ProcessLogic getProcessLogic(ResultPage resultPage){
        for (ProcessLogic processLogic : processLogicList) {
            // 获取到相应的处理器
            if(processLogic.isProcess(resultPage)){
                return processLogic;
            }
        }
        throw new ProcessLogicNotFoundException(resultPage.getUrl() + " 没有找相关处理逻辑");
    }

    /**
     * 获取所有已经处理的Url
     * @author Administratorr
     * @date 15:42 2021/6/21
     * @return java.util.List<java.lang.Object>
     **/
    public List<URI> getProcessURl(){
        return uriQueueScheduler.getProcessURI();
    }

    /**
     * 获取结果存储空间
     * @author Administratorr
     * @date 15:58 2021/6/21
     * @return com.spider.page.ResultItem
     **/
    public ResultItem getResultItem(){
        return resultItem;
    }

    /**
     * @author β世界 on 10:25 2021/5/8
     * @Description 让线程进入睡眠
     **/
    private void sleep(){
        try {
            Thread.sleep(SpiderClient.SLEEP_TIME);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    /**
     * 用于执行爬虫的内部类
     * @author cerberu
     * @date 11:34 2021/8/5
     **/
    class SpiderRun implements Runnable{
        /***
         * @author β世界 on 17:15 2020/2/8
         * @Description 先创建好相应的连接客户端，再去发送请求，得到相应的结果
         *              然后对结果进行封装处理
         *              如果，需要对网页进行持久化操作，那么就需要进行解析操作
         *              将文章的上下文持久化，然后通过过本地进行读取
         *              在等待一定的时间后，将结果的内容返回，并且交给Page对象
         **/
        @Override
        public void run() {
            // 获得请求
            HttpRequestBase httpType = buildHttpRequest();
            // 请求得到的结果
            CloseableHttpResponse response;
            URI uri;
            // 每个线程都循环处理未处理的链接，直到从连接池获取的内容为null时停止
            while ((uri = uriQueueScheduler.getUntreatedUrl()) != null){

                try {
                    // 设置uri，准备发送请求
                    httpType.setURI(uri);
                    // 发送请求
                    response = execute(httpType);
                    // 检查是否被处理
                    processResponse(response,httpType);
                } catch (Exception e) {
                    e.printStackTrace();
                    // 请求失败，报出异常
                    logger.warn("Request Execute Exception:"+httpType.getURI());
                }

            }

            try {
                httpType.clone(); // 关闭请求
            } catch (CloneNotSupportedException e) {
                e.printStackTrace();
            } finally {
                // 当一个线程执行完毕之后,就去减少一个线程计数
                executor.countDown();
                // 爬虫等待
                try {
                    Thread.sleep(spiderConfig.getInterval());
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
    }

}
