package com.wjd.spider;

import com.wjd.parser.PageAnalyzer;
import com.wjd.parser.PageContent;
import com.wjd.parser.PageHandler;
import com.wjd.parser.PageInfo;
import com.wjd.store.Store;
import org.jsoup.nodes.Document;

import java.util.Arrays;
import java.util.List;
import java.util.concurrent.*;

public class Spider {

    private final ConcurrentHashMap<String, Integer> urls = new ConcurrentHashMap<>();

    private final BlockingQueue<String> urlQueue = new LinkedBlockingQueue<>();
    private final BlockingQueue<PageContent> pageQueue = new LinkedBlockingQueue<>();
    private final BlockingQueue<PageInfo> dataQueue = new LinkedBlockingQueue<>();

    private final ExecutorService fetcherExecutors = Executors.newFixedThreadPool(10);
    private final ExecutorService parserExecutors = Executors.newFixedThreadPool(10);
    private final ExecutorService storeExecutors = Executors.newFixedThreadPool(10);

    private final List<String> seeds;
    private final int maxLevel;

    public Spider(List<String> seeds) {
        this(seeds, 1);
    }

    public Spider(List<String> seeds, int maxLevel) {
        this.seeds = seeds;
        this.maxLevel = maxLevel;
    }

    public void start() {
        for (int i = 0; i < 10; i++) {
            fetcherExecutors.submit(new FetchTask());
        }
        for (int i = 0; i < 10; i++) {
            parserExecutors.submit(new ParseTask());
        }
        for (int i = 0; i < 10; i++) {
            storeExecutors.submit(new StoreTask());
        }

        for (String url : seeds) {
            try {
                Integer val = urls.putIfAbsent(url, 0);
                if (val == null) {
                    urlQueue.put(url);
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }

    public void stop() {
        parserExecutors.shutdownNow();
        fetcherExecutors.shutdownNow();
    }

    public static void main(String[] args) {
        List<String> seeds = Arrays.asList(
                "http://www.sina.com.cn/"
        );
        new Spider(seeds).start();

        //		DBMS dbms=new DBMS();
        //		String url1="http://www.sina.com.cn/";
        //		String url2="http://www.baidu.com/";
        //		String url3="http://www.163.com/";
        //		String url4="http://www.ifeng.com";
        //		String url5="http://www.sohu.com/";
        //		UrlQueue.addElem(url1);         //种子url加入队列中
        //		UrlQueue.addElem(url2);
        //		UrlQueue.addElem(url3);
        //		UrlQueue.addElem(url4);
        //		UrlQueue.addElem(url5);
        //		VisitedUrlQueue.addElem(url1);
        //		VisitedUrlQueue.addElem(url2);
        //		VisitedUrlQueue.addElem(url3);
        //		VisitedUrlQueue.addElem(url4);
        //		VisitedUrlQueue.addElem(url5);
        //
        //		//爬虫多线程测试
        //		while(!UrlQueue.isEmpty())
        //		{
        //			//同时在线的线程只有3个
        //			if(UrlHanding.threadCounter<3)
        //			{
        //				UrlHanding thread=new UrlHanding(UrlQueue.outElem(),dbms);
        //				new Thread(thread).start();
        //				//下面就可以用WebContent了！！想怎么用就怎么用！！
        //			}
        //		}

    }

    private class FetchTask implements Runnable {

        @Override
        public void run() {
            while (true) {
                try {
                    // 1. 获取需要下载网页的 url
                    String url = urlQueue.take();
                    System.out.println("----fetch url----" + url);
                    // 2. 下载 URL 的网页内容
                    Document doc = new Downloader().download(url);
                    // 3. 对网页内容进行解析
                    PageHandler pageHandler = new PageHandler();
                    PageContent content = pageHandler.handle(url, doc);
                    // 4. 进入页面解析队列
                    pageQueue.put(content);
                    // 5. 添加新的网页链接
                    Integer level = urls.getOrDefault(url, 0);
                    if (level < maxLevel && content.getLinks() != null) {
                        for (String link : content.getLinks()) {
                            Integer val = urls.putIfAbsent(link, level + 1);
                            if (val == null) {
                                urlQueue.put(link);
                            }
                        }
                    }
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }

    }

    private class ParseTask implements Runnable {

        @Override
        public void run() {
            while (true) {
                try {
                    // 1. 获取网页内容
                    PageContent content = pageQueue.take();
                    System.out.println("++++parse url++++" + content.getUrl());
                    // 2. 对网页内容进行中文分词
                    PageAnalyzer pageAnalyzer = new PageAnalyzer();
                    PageInfo pageInfo = pageAnalyzer.analyze(content);
                    // 3. 建立索引，并保存到文件中
                    dataQueue.put(pageInfo);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }

    }

    private class StoreTask implements Runnable {

        @Override
        public void run() {
            while (true) {
                try {
                    PageInfo pageInfo = dataQueue.take();
                    Store.getInstance().insert(pageInfo);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

}
