package cn.tyoui.httpclient;

import cn.tyoui.pojo.ProxyIP;
import cn.tyoui.pojo.StatusCode;
import org.apache.commons.io.FileUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * 爬虫网页
 *
 * @author Tyoui
 */
public class HttpCrawler {

    private CloseableHttpClient httpClient = HttpClients.createDefault();

    private List<ProxyIP> list = null;

    //保存爬取的网页
    private String dir = null;

    //过滤文件名非法字符
    public String FILE_REGEX = "[?\"<>!:*]";

    /**
     * 代理初始化
     *
     * @param proxyText 自定义代理ip文本
     * @throws Exception 代理错误
     */
    public void proxyInit(String proxyText) throws Exception {
        list = new ArrayList<>();
        List<String> listIP = FileUtils.readLines(new File(proxyText));
        for (String str : listIP) {
            String ip = str.split(":")[0];
            int port = Integer.parseInt(str.split(":")[1]);
            ProxyIP proxyIp = new ProxyIP(ip, port);
            list.add(proxyIp);
        }
    }

    /**
     * 开始爬取
     *
     * @param webURL 要爬取的网址
     * @param max    最长时间爬一次
     * @param min    最短时间爬一次
     * @throws Exception 爬取失败
     */
    public void startCrawler(String webURL, int min, int max) throws Exception {
        String fileSuffix = webURL.substring(webURL.lastIndexOf("/") + 1);
        fileSuffix = fileSuffix(fileSuffix);
        String path = dir + File.separator + fileSuffix;
        File file = new File(path);
        if (file.exists())
            return;
        if (getList() == null) {
            crawler(webURL, path, null, 0);
        } else {
            int index = new Random().nextInt(list.size() - 1);
            crawler(webURL, path, list.get(index), index);
        }
        if (max != 0)
            Thread.sleep(new Random().nextInt(max) + min);
    }

    /**
     * 爬虫
     *
     * @param url   要爬的网址
     * @param path  保存的路径
     * @param proxy 代理ip的对象
     * @param index 第几个代理ip
     * @throws CloneNotSupportedException 关闭流失败
     * @throws IOException                关闭流失败
     */
    private void crawler(String url, String path, ProxyIP proxy, int index) throws CloneNotSupportedException, IOException {
        CloseableHttpResponse response = null;
        HttpGet httpGet = null;
        try {
            httpGet = new HttpGet(url);
            httpGet.setHeader("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8");
            httpGet.setHeader("Accept-Encoding", "gzip, deflate, sdch");
            httpGet.setHeader("Accept-Language", "zh-CN,zh;q=0.8");
            httpGet.setHeader("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36");
            RequestConfig requestConfig;
            if (proxy == null) {
                requestConfig = RequestConfig.custom().build();
            } else {
                HttpHost httpHost = new HttpHost(proxy.getIp(), proxy.getPort());
                requestConfig = RequestConfig.custom().setProxy(httpHost).build();
            }
            httpGet.setConfig(requestConfig);
            response = httpClient.execute(httpGet);
            int status = response.getStatusLine().getStatusCode();
            if ((status >= 200 && status < 300) || status == 404) {
                HttpEntity entity = response.getEntity();
                entity.writeTo(new FileOutputStream(path));
                System.out.println("下载成功！" + url);
            } else {
                if (list != null)
                    list.remove(index);
                throw new Exception(StatusCode.getStatus(status));
            }
        } catch (Exception e) {
            System.err.println(e + "\t" + url);
        } finally {
            if (httpGet != null)
                httpGet.clone();
            if (response != null)
                response.close();
        }
    }

    /**
     * 保存爬取网页发的文件夹
     *
     * @param dir 文件夹
     */
    public void setDir(String dir) {
        this.dir = dir;
        File file = new File(dir);
        if (!file.exists())
            file.mkdirs();
    }

    /**
     * 关闭爬取流
     */
    public void close() {
        try {
            httpClient.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }


    /**
     * 获取代理ip链表
     *
     * @return 代理ip链表
     */
    public List<ProxyIP> getList() {
        return list;
    }


    /**
     * 设置代理IP
     *
     * @param list 代理IP链表
     */
    public void setList(List<ProxyIP> list) {
        this.list = list;
    }


    /**
     * 判断文件名字是否合法
     *
     * @param suffix 文件名后缀
     * @return 合法文件名后缀
     */
    public String fileSuffix(String suffix) {
        Pattern pattern = Pattern.compile(FILE_REGEX);
        Matcher matcher = pattern.matcher(suffix);
        while (matcher.find()) {
            String name = matcher.group();
            suffix = suffix.replace(name, "_");
        }
        if (suffix.contains("\\"))
            suffix = suffix.replace("\\", "_");
        if (suffix.length() >= 250)
            suffix = suffix.substring(0, 250);
        if (!suffix.endsWith(".html"))
            suffix += ".html";
        return suffix;
    }
}

