package com.yp.test;

import java.util.ArrayList;
import java.util.HashSet;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

public class WebCrawler {
	HashSet<String> allurlSet = new HashSet<String>();// 所有的网页url，HashSet可以更高效的去重可以考虑【功能：统计去重爬取的网页数】
	ArrayList<String> notCrawlurlSet = new ArrayList<String>();// 未爬过的网页url，可能有重复，用ArrayList
	int threadCount = 1; // 线程数量TODO
	int count = 0; // 表示有多少个线程处于wait状态
	public static final Object signal = new Object(); // 线程间通信变量

	public static void main(String[] args) {
		final WebCrawler wc = new WebCrawler();
		wc.addUrl("https://www.baidu.com");// 替换成要爬的具体网址TODO
		long start = System.currentTimeMillis();
		System.out.println("开始爬虫.........................................");
		wc.begin();
		while (true) {
			if (wc.notCrawlurlSet.isEmpty() && Thread.activeCount() == 1 || wc.count == wc.threadCount) {
				long end = System.currentTimeMillis();
				System.out.println("总共爬了" + wc.allurlSet.size() + "个网页");
				System.out.println("总共耗时" + (end - start) / 1000 + "秒");
				System.exit(1);
			}
		}
	}

	// 开启线程完成爬取任务
	private void begin() {
		for (int i = 0; i < threadCount; i++) {
			new Thread(new Runnable() {
				public void run() {
					while (true) {
						String tmp = getAUrl();
						if (tmp != null) {
							crawler(tmp);
						} else {
							synchronized (signal) {
								try {
									count++;
									System.out.println("当前有" + count + "个线程在等待");
									signal.wait();
								} catch (InterruptedException e) {
									e.printStackTrace();
								}
							}
						}
					}
				}
			}, "thread-" + i).start();
		}
	}

	// 获取要爬取的url【策略：用完就删除--多线程好操作】
	public synchronized String getAUrl() {
		if (notCrawlurlSet.isEmpty()) {
			return null;
		}
		String tmpAUrl;
		tmpAUrl = notCrawlurlSet.get(0);
		notCrawlurlSet.remove(0);
		return tmpAUrl;
	}

	// 添加要爬取的网址
	public synchronized void addUrl(String url) {
		notCrawlurlSet.add(url);
		allurlSet.add(url);
	}

	// 爬具体数据，并持久化TODO
	public void crawler(String sUrl) {
		try {
			// 获取编辑推荐页
			Document document = Jsoup.connect(sUrl)
					// 模拟火狐浏览器
					.userAgent("Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)").get();
			// 获取具体数据并持久化持久化TODO
			System.out.println("爬网页" + sUrl + " 是由线程" + Thread.currentThread().getName() + "来爬");
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

}