package edu.hit.crawler.mapred;

import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.log4j.Logger;

import edu.hit.crawler.DocReader;
import edu.hit.crawler.ParserDriver;
import edu.hit.crawler.io.CrawItem;
import edu.hit.crawler.io.FetchOutput;
import edu.hit.crawler.util.HtmlParser;
import edu.hit.crawler.util.URLCanonicalizer;
import edu.hit.crawler.util.URLFilter;

/**
 * Mapper for Parser Job.<br/>
 * This Mapper parses each document that reads from the latest doc's folder, and
 * extract their outlinks using {@link Parser} class
 * 
 * @author zzc (zzc3615@gmail.com)
 * 
 */
public class ParseMapper extends Mapper<CrawItem, FetchOutput, Text, CrawItem> {
	public static Logger logger = Logger.getLogger(ParseMapper.class);

	public static HtmlParser parser = null;
	public static Pattern p = null;
	private static long start = System.currentTimeMillis();
	private static int count = 0;
	String uiservlet;
	int reportInterval;
	boolean isSendInfo;
	@Override
	public void setup(Context context) {
		parser = new HtmlParser();
		p = Pattern.compile("<meta[^>]*?charset=(\\w+)[\\W]*?>", Pattern.DOTALL
				| Pattern.CASE_INSENSITIVE);
		uiservlet = context.getConfiguration().get("org.work.crawler.ui.servlet");
		reportInterval = context.getConfiguration().getInt("org.work.crawler.reportInterval", 2*1000);
		isSendInfo = context.getConfiguration().getBoolean("org.work.crawler.isSendInfo", false);

	}

	@Override
	public void map(CrawItem key, FetchOutput value, Context context)
			throws IOException, InterruptedException {

		count++;
		long now = System.currentTimeMillis();
		if (isSendInfo && now - start > reportInterval) {
			try {
				DefaultHttpClient httpclient = new DefaultHttpClient();
				String uri = uiservlet + "?state=parsing&count=" + Integer.toString(count);


				HttpPost post = new HttpPost(uri);
				HttpResponse response = httpclient.execute(post);
				System.out.println(response.getStatusLine());
				httpclient.getConnectionManager().shutdown();

				count = 0;
				start = now;
			} catch (Exception e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}

		}
		String realURL = value.getUrl();
		//TODO:filter url and don't add the url whose host in the forbidden host table.
		if (!URLFilter.filter(realURL))
			return;
		//System.out.print("parsing\t" + realURL);

		if (value.getCode() != FetchOutput.SUCCESS) {
			key.setStatus(value.getCode());
			context.write(new Text(realURL), key);
			return;
		}

		// 由content的meta信息中提取编码信息
		String charset = null;
		int len = 2048;
		if (len > value.getContent().length)
			len = value.getContent().length;
		String header = new String(value.getContent(), 0, len);
		Matcher m = p.matcher(header);
		if (m.find()) {
			charset = m.group(1);
			if (charset == null || charset.trim().equals(""))
				charset = "utf-8";
			try {
				if (!Charset.isSupported(charset)) {
					charset = null;
				}
			} catch (Exception e) {
				e.printStackTrace();
			}
		}

		// 设定默认编码
		if (charset == null || charset.trim().equals("")) {
			// if still not found, use utf-8 charset
			charset = "utf-8";
		}
		String text = new String(value.getContent(), charset);
		// set the status of hadoop and display on console
		context.setStatus(realURL);

		List<String> outlinks = new ArrayList<String>();
		int out = 0;

		for (String link : parser.extract(realURL, text)) {
			out++;
			// url过滤
			if (!URLFilter.filter(link)) {
				continue;
			}
			link = URLCanonicalizer.getCanonicalURL(link);
			if (null == link || link.length() == 0)
				continue;
			outlinks.add(link);
		}

		// 原链接跳转
		// such as www.souhu.com -> www.sohu.com
		if (!realURL.equals(key.getUrl())) {
			/*
			 * //TODO: 原链接写入库合适不？ // write www.souhu.com context.write(new
			 * Text(key.getUrl()), new CrawItem(key.getUrl(), CrawItem.GONE, 0,
			 * 0));
			 */
			// write www.sohu.com
			key.setUrl(realURL);
			key.setOutlinks(out);
			key.setStatus(CrawItem.SUCCESS);
			context.write(new Text(realURL), key);
		} else {
			// no redirect : www.sohu.com, this has exist in CrawlDb, so set
			// score 0
			key.setOutlinks((int) out);
			key.setStatus(CrawItem.SUCCESS);
			context.write(new Text(realURL), key);
		}

		// 处理解析出来的链接url, set score 0, new add score score/out

		for (String url : outlinks) {
			context.write(new Text(url), new CrawItem(url, CrawItem.UNCRAWL));
		}

	}

}
