package wang.ddcat.reptilian_plus.util;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import wang.ddcat.reptilian_plus.constant.UserAgentConstant;
import wang.ddcat.reptilian_plus.link.Links;
import wang.ddcat.reptilian_plus.page.Page;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLConnection;
import java.net.UnknownHostException;
import java.util.*;

public class WebCrawler {

    private static Logger logger = LoggerFactory.getLogger(WebCrawler.class);
    // 爬取成功后的结果集
    public static List<Page> pages = new ArrayList<>();

    // 用户浏览器标识
    private String user_agent = UserAgentConstant.PC_Chrome;
    // 告诉服务器是从哪个页面链接过来的
    private String referer = "";
    // 是否使用缓存
    private boolean use_caches = false;
    // 连接超时5000ms
    private int timeout = 5000;
    // 编码格式
    private String charset_name = "UTF-8";
    //是否清除上次已经爬取过的链接
    private boolean removeVisitedUrlSet = true;
    // 线程数量
    private int threadCount = 10;
    // 表示有多少个线程处于wait状态
    private int waitCount = 0;
    // 失效链接
    private int invalidURLCount;

    // 线程间通信变量
    public static final Object signal = new Object();

    public List<Page> start(String...urls) {
        if(removeVisitedUrlSet){
            Links.removeVisitedUrlSet(urls);//移除访问过的 URL
        }
        this.addUrl(urls);//添加链接到待爬链接集合中
        long start= System.currentTimeMillis();//开始时间
        logger.info("·················· ( 开始爬虫 ) ··················");
        this.begin();//开始爬取
        while(true){
            System.out.print("");
            if(Links.unVisitedUrlQueueIsEmpty()&& Thread.activeCount() == 1||this.waitCount==this.threadCount){
                long end = System.currentTimeMillis();
                logger.info("总共爬了 ( "+Links.getVisitedUrlNum()+" ) 个网页 . [失败("+invalidURLCount+")个网站:因为找不到host]");
                logger.info("总共耗时: "+(end-start)+" 毫秒");
                return pages;
            }
        }
    }
    private void begin() {
        for(int i=0;i<threadCount;i++){
            new Thread(new Runnable(){
                public void run() {
                    while (true) {
                        //logger.info("当前进入"+Thread.currentThread().getName());
                        String tmp = getAUrl();
                        if(tmp!=null){
                            crawler(tmp);
                        }else{
                            synchronized(signal) {
                                try {
                                    waitCount++;
                                    logger.info("当前有 ( "+waitCount+" ) 个线程在等待");
                                    signal.wait();
                                } catch (InterruptedException e) {
                                    logger.error(e.getMessage());
                                }
                            }
                        }
                    }
                }
            },"thread-"+i).start();
        }
    }
    private synchronized  String getAUrl() {
        if(Links.unVisitedUrlQueueIsEmpty())
            return null;
        String tmpAUrl;
//		synchronized(notCrawlurlSet){
            //先从待访问的序列中取出第一个 再把该链接从未爬取的集合中移除;
            tmpAUrl = (String) Links.removeHeadOfUnVisitedUrlQueue();
//		}
        return tmpAUrl;
    }

    private synchronized void  addUrl(String...urls){
        for (int i = 0; i < urls.length; i++){
            //将所有链接放入到待爬链接集合中
            Links.addUnvisitedUrlQueue(urls[i]);
        }
    }

    //爬网页visitUrl
    private  void crawler(String visitUrl){
        URL url;
        try {
            url = new URL(visitUrl);
            URLConnection conn = url.openConnection();
            StringBuffer sb = new StringBuffer();//sb为爬到的网页内容
            // 设置是否使用缓存
            conn.setUseCaches(use_caches);
            conn.setRequestProperty("accept", "*/*");
            conn.setRequestProperty("connection", "Keep-Alive");
            //用户标识
            conn.setRequestProperty("user-agent",user_agent);
            //告诉服务器是从哪个页面链接过来的
            conn.setRequestProperty("RefererConstant",referer);
            //连接时间
            conn.setConnectTimeout(timeout);
            conn.connect();
            BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream(),
                    charset_name));
            String line;
            while ((line = in.readLine()) != null) {
                sb.append(line);
            }
            byte[] bytes = sb.toString().getBytes();
            Page page = new Page(bytes,visitUrl,conn.getContentType());
            pages.add(page);
            //logger.info("爬网页"+visitUrl+"成功，由线程"+Thread.currentThread().getName()+"来爬");
        } catch (UnknownHostException e){
            //找不到网站 404
            invalidURLCount++;
        } catch (IOException e) {
            //将出现异常的链接重新放入到未访问的链接中；
            Links.addUnvisitedUrlQueue(visitUrl);
            return;
        }finally {
            //将已经访问过的链接放入已访问的链接中；
            Links.addVisitedUrlSet(visitUrl);
        }
    }
    // 设置 user_agent 用户请求浏览器标识
    public WebCrawler user_agent(String user_agent){
        this.user_agent = user_agent;
        return this;
    }
    // 设置 告诉服务器是从哪个页面链接过来的
    public WebCrawler referer(String referer){
        this.referer = referer;
        return this;
    }
    // 设置 用户连接超时时间
    public WebCrawler timeout(Integer timeout){
        this.timeout = timeout;
        return this;
    }
    // 设置 用户连接超时时间
    public WebCrawler use_caches(boolean use_caches){
        this.use_caches = use_caches;
        return this;
    }
    // 设置 爬虫线程 默认10个
    public WebCrawler threadCount(Integer threadCount){
        this.threadCount = threadCount;
        return this;
    }
    public WebCrawler removeVisitedUrlSet(boolean removeVisitedUrlSet){
        this.removeVisitedUrlSet = removeVisitedUrlSet;
        return this;
    }
}