package edu.hit.crawler.util;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Collections;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.log4j.Logger;

/**
 * 从配置文件中读取网站爬取规则 
 * @author
 */
public class RobotRulesParser {

	public static final Logger LOG = Logger.getLogger(RobotRulesParser.class);

	private static final String CHARACTER_ENCODING = "UTF-8";
	private static final RuleSet EMPTY_RULES = new RuleSet();
	private static final RuleSet FORBID_ALL_RULES = getForbidAllRules();
	
	private static final int Max_Redirect = 5;

	private static RuleSet getForbidAllRules() {

		RuleSet rules = new RuleSet();
		rules.addRule("", false);	//如果因为403的原因导致robots.txt得不到，那么本站就禁止爬取
		return rules;
	}

	/**
	 * 根据url来获得RuleSet
	 */
	public RuleSet getRobotsRules(URL url) {

		RuleSet rule = null;

		if(rule == null) {
			rule = getRobotsRuleFromWeb(url);
		}		

		//排序，将allow规则排在前面，优先匹配
		List<RobotsEntry> robotRules = rule.getRules();
		Collections.sort(robotRules);
		rule.setRules(robotRules);
		
		return rule;
	}

	/**
	 * 从网络上下载网页对应主机的允许采集的规则
	 * @param url 网页对应的URL
	 * @return 网页对应的主机的采集规则，如果未找到则默认允许采集
	 */
	private RuleSet getRobotsRuleFromWeb(URL url) {
		String host = url.getHost().toLowerCase();
		URL redir = null;
		RuleSet rule = null;
		try {

			HttpURLConnection conn = (HttpURLConnection) new URL("http://"
					+ host + "/robots.txt").openConnection();
			conn.setConnectTimeout(60000);
			conn.setReadTimeout(60000);
			conn.setInstanceFollowRedirects(false);
			conn.setRequestProperty(
					"user-agent",
					"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 2.0.50727; Maxthon 2.0)");

			int i = 0;
			while ((conn.getResponseCode() == 301 || conn.getResponseCode() == 302)
					&& i < Max_Redirect) {
				String redirection = conn.getHeaderField("Location");
				if (redirection == null)
					redirection = conn.getHeaderField("Location");
				if (!redirection.endsWith("robots.txt")) {
					return EMPTY_RULES;
				}
				if (redirection != null) {
					if (!redirection.startsWith("http"))
						// RFC says it should be absolute, but apparently it isn't
						redir = new URL(url, redirection);
					else
						redir = new URL(redirection);
				}

				conn = (HttpURLConnection) redir.openConnection();
				conn.setConnectTimeout(60000);
				conn.setReadTimeout(60000);
				i++;
			}

			if (conn.getResponseCode() == 200) {

				char[] buffer = new char[2048];
				StringBuffer pageBuffer = new StringBuffer();
				BufferedReader reader = new BufferedReader(
						new InputStreamReader(conn.getInputStream()));
				int length = 0;
				length = reader.read(buffer);
				while (length != -1) {
					pageBuffer.append(buffer, 0, length);
					length = reader.read(buffer);
				}
				rule = parseRules(pageBuffer.toString());
			} else if (conn.getResponseCode() == 403) {
				// use forbid all
				rule = FORBID_ALL_RULES;
			} else if (conn.getResponseCode() >= 500) {
				rule = EMPTY_RULES;
			} else {
				rule = EMPTY_RULES;
			}
		} catch (Throwable t) {
			LOG.info("Couldn't get robots.txt for " + url + ": " + t.toString());
			rule = EMPTY_RULES;
		}
		return rule;
	}

	public RuleSet parseRules(String robotContent) {
		if (robotContent == null || "".equals(robotContent)) {
			return EMPTY_RULES;
		}

		RuleSet rule = new RuleSet();
		StringTokenizer lineParser = new StringTokenizer(robotContent, "\n\r");
		boolean findAgent = false; // 是否找到User-agent: *的规则

		while (lineParser.hasMoreTokens()) {
			String line = lineParser.nextToken().toLowerCase();
			// 去除robot.txt中的注释
			int hashPos = line.indexOf("#");
			if (hashPos >= 0)
				line = line.substring(0, hashPos);

			line = line.trim();
			if ((line.length() >= 11)
					&& line.substring(0, 11).equalsIgnoreCase("User-agent:")) {
				String agentNames = line.substring(line.indexOf(":") + 1)
						.trim();
				if (agentNames.equals("*")) {
					findAgent = true;
					break;
				}
			}
		}
		if (findAgent) {
			while (lineParser.hasMoreTokens()) {

				String line = lineParser.nextToken().toLowerCase();
				// 取出robot.txt中的注释
				int hashPos = line.indexOf("#");
				if (hashPos >= 0)
					line = line.substring(0, hashPos);
				line = line.trim();

				if ((line.length() >= 9)
						&& (line.substring(0, 9).equalsIgnoreCase("Disallow:"))) {

					String path = line.substring(line.indexOf(":") + 1).trim();
					try {
						path = URLDecoder.decode(path, CHARACTER_ENCODING);
					} catch (UnsupportedEncodingException e) {
						System.err.println("error parsing robots rules-can't decode path: " + path);
					}
					if (0 == path.length()) {
						rule.clearRules();
					} else {
						// 规则不为空的话
						rule.addRule(path, false);
					}

				} else if ((line.length() >= 6)
						&& (line.substring(0, 6).equalsIgnoreCase("Allow:"))) {

					String path = line.substring(line.indexOf(":") + 1).trim();
					try {
						path = URLDecoder.decode(path, CHARACTER_ENCODING);
					} catch (UnsupportedEncodingException e) {
						System.err.println("error parsing robots rules-can't decode path: " + path);
					}
					if (0 == path.length()) {
						rule.clearRules();
					} else
						// 规则不为空的话
						rule.addRule(path, true);
				} else if ((line.length() >= 11)
						&& line.substring(0, 11).equalsIgnoreCase("User-agent:"))
					break;
			}
		}
		return rule;
	}

	public boolean isAllowed(URL url) {
		String path = url.getPath();
		if ((path == null) || "".equals(path)) {
			path = "/";
		}
		return getRobotsRules(url).isAllowed(path);
	}

	public static void main(String[] args) throws Exception {
		RobotRulesParser robotParser = new RobotRulesParser();

		FileInputStream fin = new FileInputStream("url.txt");
		BufferedReader br = new BufferedReader(new InputStreamReader(fin));
		String s;
		while ((s = br.readLine()) != null) {
			System.out.println("url :" + s + " " + robotParser.isAllowed(new URL(s)));
			System.out.println("================");
		}
		br.close();
		fin.close();
	}
}