package com.ihunanren.common.spider;

import static com.google.common.base.Preconditions.checkNotNull;

import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;

import com.google.common.base.Throwables;
import com.google.common.collect.Maps;

/**
 * @author kalman03
 */
public abstract class AbstractWebSipder<T>{

	/**存储未处理URL*/
	private List<String> unVisitedUrls = Collections.synchronizedList(new ArrayList<String>());
	/**存储访问了的URL*/
	private Set<String> visitedUrls = Collections.synchronizedSet(new HashSet<String>());
	/**存储所有URL深度*/
	private Map<String, Integer> urlsDeep = Maps.newHashMap();
	
	private AtomicLong success = new AtomicLong();
	private AtomicLong failed = new AtomicLong();
	
	public abstract void onStart(WebSpiderParam param,T t);
	public abstract void doHandle(WebPage webPage,T t);
	public abstract void onEnd(long success,long failed,T t);
	public abstract void onException(Throwable e,T t);
	
	
	private final WebSpiderParam param;
	private T t;
	
	public AbstractWebSipder(final WebSpiderParam param,T t) {
		checkNotNull(param,"WebSpiderParam can't be null!");
		param.check();
		this.param = param;
		this.t = t;
	}
	
	private synchronized String getAUnProcessedUrl() {
		String tmpAUrl = unVisitedUrls.get(0);
		unVisitedUrls.remove(0);
		return tmpAUrl;
	}
	
	public void doSpider() { // 由用户提供的域名站点开始，对所有链接页面进行抓取
		try {
			String url = UrlHelper.formatLink(param.getBaseUrl());
			unVisitedUrls.add(url);
			visitedUrls.add(url);
			urlsDeep.put(url, 1);

			onStart(param,t);
			for (int i = 0; i < param.getThread(); i++) {
				new Thread(new Processer(this)).start();//开始制定爬取页面的线程
			}
			while(true){
				if(Thread.activeCount() == 1 && unVisitedUrls.isEmpty()){
					onEnd(success.get(), failed.get(),t);
					break;
				}
			}
		} catch (Exception e) {
			onException(e,t);
			throw Throwables.propagate(e);
		}
	}
	/**
	 * 对后续解析出的url进行抓取
	 */
	private void getWebpageByLink(String baseLink) { 
		try {
			int tempDeep = urlsDeep.get(baseLink);
			if(tempDeep > param.getDeep()){
				return;
			}
			WebPage webPage = new WebPage(baseLink,tempDeep);
			if (webPage.hasHttpLinks()) {
				doHandle(webPage,t);
				Set<String> allLinks = webPage.getMatchedLinkList(param.getLinkRegex());
				for (String link : allLinks) {
					String url = UrlHelper.formatLink(link);
					if (isNewUrl(url)) {
						unVisitedUrls.add(url);
						visitedUrls.add(url);
						urlsDeep.put(url, (urlsDeep.get(baseLink) + 1));
						success.incrementAndGet();
						if(urlsDeep.get(url) < param.getDeep()){
							getWebpageByLink(url);
						}
					}
				}
			}
			
		} catch (Exception e) {
			failed.decrementAndGet();
			onException(e,t);
		}
	}
	
	private boolean isNewUrl(String link) throws MalformedURLException{
		return !visitedUrls.contains(link);
	}
	
	class Processer implements Runnable { // 独立的抓取线程
		AbstractWebSipder<?> spider;
		
		public Processer(AbstractWebSipder<?> spider) {
			this.spider = spider;
		}

		public void run() {
			while (!unVisitedUrls.isEmpty()) {
				getWebpageByLink(getAUnProcessedUrl());
			}
		}
	}
}
