/**
 * Project Name:lee
 * File Name:WebSpider.java
 * Package Name:com.lee.WebSpider
 * Date:2017年1月17日下午5:10:25
 * Copyright (c) 2017, chenzhou1025@126.com All Rights Reserved.
 *
 */

package com.lee.webSpider;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * ClassName:WebSpider <br/>
 * Function: TODO ADD FUNCTION. <br/>
 * Reason: TODO ADD REASON. <br/>
 * Date: 2017年1月17日 下午5:10:25 <br/>
 * 
 * @author dell
 * @version
 * @since JDK 1.6
 * @see
 */
public class WebSpider implements Runnable{
	//不允许访问主机的缓存
	private Map<String, ArrayList<String>> disallowListCache = new HashMap<String, ArrayList<String>>();
	List<String> errorList = new ArrayList<String>(); //错误信息
	List<String> result = new ArrayList<String>();//结果集
	String startUrl; //开始搜索的起点
	int maxUrl; // 最大处理的url数
	String searchString; // 要搜索的字符串（英文）
	boolean caseSensitive = false;//是否区分大小写
	boolean limitHost = false;//是否限制主机搜索
	
	public WebSpider() {

	}
	public WebSpider(String startUrl, int maxUrl, String searchString) {
		this.startUrl = startUrl;
		this.maxUrl = maxUrl;
		this.searchString = searchString;
	}

	public List<String> getResult() {
		return result;
	}
	public void run(){
		crawl(startUrl, maxUrl, searchString, limitHost, caseSensitive);
	}
	//检测url格式
	private URL verifyUrl(String url) {
		// 只处理http://的
		if (!url.toLowerCase().startsWith("http://")) {
			return null;
		}
		URL verifyurl = null;
		try {
			verifyurl = new URL(url);
		} catch (MalformedURLException e) {
			return null;
		}
		return verifyurl;
	}
	//检测robot是否允许访问给出的url
	private boolean isRobotAllowed(URL urlToCheck){
		//获取给出主机的url
		String host = urlToCheck.getHost().toLowerCase();
		//获取主机不允许访问的缓存
		ArrayList<String> disallowList = disallowListCache.get(host);
		//如果没有缓存，下载并缓存
		if (disallowList == null) {
			disallowList = new ArrayList<String>();
			try {
				URL robotsFileUrl = new URL("http://"+host+"/robots.txt");
				BufferedReader  reader = new BufferedReader(new InputStreamReader(robotsFileUrl.openStream()));
				//读取robot文件，创建不允许访问的路径列表
				String line;
				while((line= reader.readLine())!=null){
					//index of 字符串首次出现的位置
					if(line.indexOf("Disallow:")==0){ //包含Disallow:
						//获取不允许访问的路径
						String disallowPath = line.substring("Disallow:".length());
						//检查是否含有注释
						int commentIndex = disallowPath.indexOf("#");
						if(commentIndex != -1){ //有注释
							//去掉注释
							disallowPath = disallowPath.substring(0,commentIndex);
						}
						//去掉空格
						disallowPath = disallowPath.trim();
						disallowList.add(disallowPath);
					}
				}
				//添加到缓存
				disallowListCache.put(host, disallowList);
			} catch (Exception e) {
				//web站点跟目录下没有robot.txt ,返回真
				return true;
			}
		}
		//判断检查的url是否允许访问
		String file = urlToCheck.getFile();
		for (int i = 0; i < disallowList.size(); i++) {
			String disallow = disallowList.get(i);
			if (file.startsWith(disallow)) {
				return false;
			}
		}
		return true;
	}
	//下载page页面
	private String downloadPage(URL pageUrl){
		try {
			BufferedReader reader = new BufferedReader(new InputStreamReader(pageUrl.openStream()));
			String line;
			StringBuffer stringBuffer = new StringBuffer();
			while ((line = reader.readLine()) != null) {
				stringBuffer.append(line);
			}
			return stringBuffer.toString();
		} catch (IOException e) {
			e.printStackTrace();
		}
		return null;
	}
	//从url中去掉www
	private String removeWwwFromUrl(String url){
		int index = url.indexOf("://www.");
		if(index != -1){
			url = url.substring(0,index+3)+url.substring(index+7);
		}
		return url;
	}
	//解析页面，并找出链接
	public ArrayList<String> retrieveLinks(URL pageUrl,String pageContents,Set<String> crawledList,boolean limitHost){
		//用正则表达式编译链接的匹配模式
//		String patternString = "<a\\s.*?href=\"([^\"]+)\"[^>]*>(.*?)</a>";
		String patternString = "(?i)(?s)<\\s*?a.*?href\\s*?=\\s*?[\",'](.*?)[\",'].*?>";
		Pattern pattern = Pattern.compile(patternString,Pattern.CASE_INSENSITIVE);//忽略大小写
		Matcher matcher = pattern.matcher(pageContents);
		ArrayList<String> linkList = new ArrayList<String>();
		while (matcher.find()){
			String link = matcher.group(1).trim();
			if(link.length() < 1){ //链接为空
				continue;
			}
			if(link.charAt(0)=='#'){ //链接被注释
				continue;
			}
			if (link.indexOf("mailto:") != -1) { //邮件
				continue;
			}
			if (link.toLowerCase().indexOf("<a href='http://lib.csdn.net/base/javascript' class='replace_word' title='JavaScript知识库' target='_blank' style='color:#df3434; font-weight:bold;'>JavaScript</a>")!=-1){
				continue;
			}
			if (link.indexOf("://") != -1) {
				if(link.charAt(0) == '/'){ //处理绝对地址
					link = "http://"+pageUrl.getHost()+":"+pageUrl.getPort()+link;
				}else{
					String file = pageUrl.getFile();
					if(file.indexOf("/") != -1 ){ //处理相对的
						link = "http://"+pageUrl.getHost()+":"+pageUrl.getPort()+"/"+link;
					}else {
						String path = file.substring(0,file.lastIndexOf("/")+1);
						link = "http://"+pageUrl.getHost()+":"+pageUrl.getPort()+path+link;
					}
				}
			}
			int index = link.indexOf("#");
			if (index != -1) {
				link = link.substring(0,index);
			}
			link = removeWwwFromUrl(link);
			URL verifiedLink = verifyUrl(link);
			if(verifiedLink == null){
				continue;
			}
			//如果限定主机，排除不合条件的url
			if(limitHost&&!pageUrl.getHost().toLowerCase().equals(verifiedLink.getHost().toLowerCase())){
				continue;
			}
			//跳过处理过的link
			if(crawledList.contains(link)){
				continue;
			}
			System.out.println(link+"-------------");
			linkList.add(link);
		}
		System.out.println(linkList+"+++++++");
		return linkList;
	}
	//搜索下载页的内容，判断有没有指定的搜索字符
	private boolean searchStringMatchs(String pageContents,String searchString,boolean caseSensitive){
		String searchContents = pageContents;
		if(!caseSensitive){ // 不区分大小写
			searchContents = pageContents.toLowerCase();
		}
		Pattern pattern = Pattern.compile("[//s]+");
		String[] terms = pattern.split(searchString);
		for (int i = 0; i < terms.length; i++) {
			if(caseSensitive){
				if(searchContents.indexOf(terms[i]) == -1){
					return false;
				}
			}else {
				if(searchContents.indexOf(terms[i]) == -1){
					return false;
				}
			}
		}
		return true;
	}
	//实际的操作
	public List<String> crawl(String startUrl,int maxUrls,String searchString,boolean limitHost,boolean caseSensitive){
		//搜索过的
		Set<String> crawledList = new HashSet<String>(); 
		//要搜索的
		Set<String> toCrawlList = new LinkedHashSet<String>();
		if(maxUrls < 1){
			errorList.add("Invalid Max URLs value.");
			System.out.println("Invalid Max URLs value.");
		}
		if(searchString.length()<1){
			errorList.add("Missing Search Stirng.");
			System.out.println("Missing Search Stirng.");
		}
		if(errorList.size()>0){
			System.out.println("error!!!");
			return errorList;
		}
		//从开始url中移除www
		//startUrl = removeWwwFromUrl(startUrl);
		toCrawlList.add(startUrl);
		while (toCrawlList.size()>0) {
			if(maxUrls != -1){
				if (crawledList.size() == maxUrls) {
					break;
				}
			}
			String url = toCrawlList.iterator().next();
			toCrawlList.remove(url);
			URL verifyurl = verifyUrl(url);
			if(!isRobotAllowed(verifyurl)){
				continue;
			}
			
			crawledList.add(url);
			String pageContents = downloadPage(verifyurl);
			List<String> links = retrieveLinks(verifyurl, pageContents, crawledList, limitHost);
			toCrawlList.addAll(links);
			if (searchStringMatchs(pageContents, searchString, caseSensitive)) {
				result.add(url);
				System.out.println(url);
			}
		}
		return result;
	}
	
	public String getResult(String url) {
		// 定义一个结果集
		String result = "";
		// 定义一个缓冲字符流
		BufferedReader in = null;
		try {
			// 将url转换成url对象
			URL realUrl = verifyUrl(url);
			// 初始化url链接
			URLConnection connection = realUrl.openConnection();
			// 开始实际链接
			connection.connect();
			// 初始化BufferedReader
			in = new BufferedReader(new InputStreamReader(connection.getInputStream()));
			// 临时结果集
			String line;
			while ((line = in.readLine()) != null) {
				result += line;
			}
		} catch (MalformedURLException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			try {
				if (in != null) {
					in.close();
				}
			} catch (Exception e2) {
				e2.printStackTrace();
			}
		}
		return result;
	}

	public static void main(String[] args) {
		WebSpider webSpider = new WebSpider("http://www.baidu.com",20,"java");
		Thread thread = new Thread(webSpider);
		thread.start();
		/*WebSpider webSpider2 = new WebSpider();
		URL url = webSpider2.verifyUrl("http://www.sina.com");
		String pageContent = webSpider2.downloadPage(url);
		System.out.println(pageContent);
		boolean b = webSpider2.searchStringMatchs(pageContent, "sina", true);
		System.out.println(b);*/
		
	}

}
