package mylibrary.spider;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class WebCrawlerMe {

	private HashMap<String, List> disallowListCache = new HashMap<String, List>();
	private List errorList = new ArrayList();
	private List result = new ArrayList();
	private String startUrl;
	private int maxUrl;
	//private String searchString;
	//boolean caseSensitive = false;
	boolean limitHost = false;
	
	public WebCrawlerMe(String startUrl, int maxUrl, String searchString) {
		this.startUrl = startUrl;
		this.maxUrl = maxUrl;
		//this.searchString = searchString;
	}
	
	public List getResult() {
		return result;
	}
	
	/**
	 * Only handle HTTP url
	 * @param url
	 * @return
	 */
	private URL verifyUrl(String url) {
		if(!url.toLowerCase().startsWith("http://")) {
			return null;
		}
		URL verifiedUrl = null;
		try {
			verifiedUrl = new URL(url);
		} catch (Exception e) {
			e.printStackTrace();
		}
		return verifiedUrl;
	}
	
	/**
	 * Check if the url is allowed to crawl
	 * @param urlToCheck
	 * @return
	 */
	private boolean isAllowedInRobot(URL urlToCheck) {

		String host = urlToCheck.getHost().toLowerCase();
		List disallowList = disallowListCache.get(host);
		if(disallowList == null) {
			disallowList = new ArrayList();
			try {
				URL robotsFileUrl = new URL("http://" + host + "/robots.txt");
				BufferedReader reader = new BufferedReader(new InputStreamReader(robotsFileUrl.openStream()));
				
				String line = null;
				while((line=reader.readLine()) != null) {
					if(line.indexOf("Disallow:") == 0) {
						String disallowPath = line.substring("Disallow:".length());
						
						int commentIndex = disallowPath.indexOf("#");
						if(commentIndex != -1) {
							disallowPath = disallowPath.substring(0, commentIndex);
						}
						disallowList.add(disallowPath.trim());
					}
				}
				disallowListCache.put(host, disallowList);
			} catch (Exception e) {
				return true;
			}
		}
		String file = urlToCheck.getFile();
		for(int i=0; i<disallowList.size(); i++) {
			String disallowPath = (String)disallowList.get(i);
			if(file.startsWith(disallowPath)) {
				return false;
			}
		}
		return true;
	}
	
	/**
	 * Download page content
	 * @param pageUrl
	 * @return
	 */
	private String downloadPage(URL pageUrl) {
		StringBuffer pageBuffer = new StringBuffer();
		try {
			BufferedReader reader = new BufferedReader(new InputStreamReader(pageUrl.openStream()));
			
			String line;
			while((line=reader.readLine()) != null) {
				pageBuffer.append(line);
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
		return pageBuffer.toString();
	}

	private String removeWwwFromUrl(String url) {
		int index = url.indexOf("://www.");
		if(index != -1) {
			return url.substring(0, index+3) + url.substring(index+7);
		}
		return url;
	}
	
	private List retrieveLinks(URL pageUrl, String pageContents, HashSet crawledList, boolean limitHost) {
		
		Pattern p = Pattern.compile("<a\\s+href\\s*=\\s*\"?(.*?)[\"|>]", Pattern.CASE_INSENSITIVE);
		Matcher m = p.matcher(pageContents);
		
		List linkList = new ArrayList();
		while(m.find()) {
			String link = m.group(1).trim();
			if(link.length() < 1) {
				continue;
			}
			if(link.charAt(0) == '#') {
				continue;
			}
			if(link.indexOf("mailto:") != -1) {
				continue;
			}
			if(link.toLowerCase().indexOf("javascript") != -1) {
				continue;
			}
			
//			if(link.indexOf("://") == -1) {		//relative path
//				if(link.charAt(0) == '/') {
//					link = "http://" + pageUrl.getHost() + ":" + pageUrl.getPort() + link;
//				} else {
//					String file = pageUrl.getFile();
//					if (file.indexOf('/') == -1) {  	// 处理相对地址
//			         link = "http://" + pageUrl.getHost() + ":" + pageUrl.getPort() + "/" + link;
//			        } else {
//			        	String path = file.substring(0,file.lastIndexOf('/') + 1);
//			        	link = "http://" + pageUrl.getHost() + ":" + pageUrl.getPort() + path + link;
//			        }
//				}
//			}
			
			int index = link.indexOf("#");
			if(index != -1) {
				link = link.substring(0, index);
			}
			//link = this.removeWwwFromUrl(link);
			
			URL verifiedLink = this.verifyUrl(link);
			if(verifiedLink == null) {
				continue;
			}
			
//			if(limitHost && !pageUrl.getHost().toLowerCase().equals(verifiedLink.getHost().toLowerCase())) {
//				continue;
//			}
			
//			if(crawledList.contains(link)) {
//				continue;
//			}
			linkList.add(link);
		}
		
		return linkList;
	}

	private Set crawl(String startUrl, int maxUrls, String searchString, boolean limitHost, boolean caseSensitive) {
		
		HashSet crawledList = new HashSet();
		LinkedHashSet toCrawlList = new LinkedHashSet();
		
		//if(maxUrls < 1) {
			errorList.add("Invalid Max Url value");
		//}
		
		//if(searchString.length() < 1) {
			//errorList.add("Missing Search String.");
		//}
		
		if (errorList.size() > 0) {
			   System.out.println("err!!!");
			   //return errorList;
		}
		
		
		//startUrl = this.removeWwwFromUrl(startUrl);
		
		toCrawlList.add(startUrl);
		
		while(toCrawlList.size() > 0) {
			if(maxUrls != -1) {
				if(crawledList.size() == maxUrls) {
					break;
				}
			}
			
			String url = (String) toCrawlList.iterator().next();
			
			toCrawlList.remove(url);
			
			URL verifiedUrl = this.verifyUrl(url);
			
			if(!this.isAllowedInRobot(verifiedUrl)) {
				continue;
			}
			crawledList.add(url);
			String pageContent = this.downloadPage(verifiedUrl);
			
			if(pageContent!=null && pageContent.length()>0) {
				List links = this.retrieveLinks(verifiedUrl, pageContent, crawledList, limitHost);
				toCrawlList.addAll(links);
				
				for(int i=0; i<links.size(); i++) {
					System.out.println(links.get(i));
				}
				
			}
		}

		
		return crawledList;
	}

	public static void main(String[] args) throws Exception {
		
		WebCrawlerMe t = new WebCrawlerMe("http://www.163.com/", 10000, "111");
		t.crawl(t.startUrl, t.maxUrl, "", t.limitHost, false);
		
		
	}
}














