package com.eric.r2d2.pageProcessor;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.TimeZone;
import java.util.UUID;

import org.apache.commons.lang.StringUtils;
import org.dom4j.DocumentHelper;
import org.dom4j.Element;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.eric.utils.TextUtils;
import com.eric.utils.UrlUtils;
import com.eric.utils.page.ArticleExtractor;
import com.eric.utils.page.IR;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.processor.PageProcessor;

public class NewsProcessor implements PageProcessor {

	private final String UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31";

	private Site site = Site.me().setUserAgent(UA);

	Properties p = new Properties();

	private Logger logger = LoggerFactory.getLogger(getClass());

	private List<String> approvedUrlList;

	private Set<String> urlPattern;

	String EXCLUDE_FILE_REGEX;
	int PAGE_TEXT_COUNT_LOWER_LIMIT;
	int URL_LENGTH_LIMIT;
	int LINKS_LIMIT;

	public NewsProcessor(List<String> approvedUrlList, Set<String> urlPattern) {
		try {
			p.load(new FileInputStream(new File("resources/SEO_INFO_SETTINGS")));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			logger.error("SEO INFO SETTING File Not Found");
		} catch (IOException e) {
			e.printStackTrace();
		}

		PAGE_TEXT_COUNT_LOWER_LIMIT = Integer.parseInt(p.getProperty("PAGE_TEXT_COUNT_LOWER_LIMIT"));
		URL_LENGTH_LIMIT = Integer.parseInt(p.getProperty("URL_LENGTH_LIMIT"));
		EXCLUDE_FILE_REGEX = p.getProperty("EXCLUDE_FILE_REGEX");
		LINKS_LIMIT = Integer.parseInt(p.getProperty("LINKS_LIMIT"));

		this.approvedUrlList = approvedUrlList;
		this.urlPattern = urlPattern;
	}

	public void process(Page page) {
		// 添加URL至爬行队列
		List<String> requests = new ArrayList<String>();
		List<String> urls = page.getHtml().links().regex(".+").all();
		for (String s : urls) {
			if (approvedUrl(UrlUtils.stripParas(s))) {
				requests.add(UrlUtils.stripParas(s));
			}
		}
		page.addTargetRequests(requests);

		String url = UrlUtils.stripParas(page.getUrl().toString());// 删除参数
		Document doc = Jsoup.parse(page.getHtml().toString());
		if (IR.isArticlePage(doc)) {
			// urlPattern.add(UrlUtils.stripParas(url));//
			// 去掉参数，保存干净的URL，但不提取pattern

			String title = doc.title();
			UUID uuid = UUID.randomUUID();

			ArticleExtractor ex = new ArticleExtractor(doc);
			String content = TextUtils.simplifyText(ex.extract());

			String description = doc.select("meta[name=description]").attr("content");

			org.dom4j.Document xml = DocumentHelper.createDocument();
			Element docEntry = xml.addElement("doc");

			Element field_url = docEntry.addElement("field");
			field_url.addAttribute("name", "url");
			field_url.setText(url);

			Element field_contenttitle = docEntry.addElement("field");
			field_contenttitle.addAttribute("name", "contenttitle");
			field_contenttitle.setText(title);

			Element field_docno = docEntry.addElement("field");
			field_docno.addAttribute("name", "docno");
			field_docno.setText(uuid.toString());

			Element field_content = docEntry.addElement("field");
			field_content.addAttribute("name", "content");
			field_content.setText(content);

			Element field_description = docEntry.addElement("field");
			field_description.addAttribute("name", "description");
			field_description.setText(description);

			String date = getSolrDate(IR.getTime(doc));
			if (StringUtils.isNotBlank(date)) {
				Element field_date = docEntry.addElement("field");
				field_date.addAttribute("name", "date");
				field_date.setText(date);
			}

			Element field_crawldate = docEntry.addElement("field");
			// SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd
			// hh:mm:ss");
			Date cd = new Date();
			// String strcd = format.format(cd);
			String strcd = getSolrDate(cd);
			field_crawldate.addAttribute("name", "crawldate");
			field_crawldate.setText(strcd);

			page.putField("results", xml.asXML().replaceAll("<\\?xml version=\"1.0\" encoding=\"UTF-8\"\\?>\n", ""));
			page.putField("url", url);
		} else {
			logger.info(url + " Contains No Article!");
		}
	}

	public boolean approvedUrl(String strUrl) {
		List<String> approvedRoots = new ArrayList<String>();
		for (String s : approvedUrlList) {
			approvedRoots.add(UrlUtils.getRoot(s));
		}
		String targetRoot = UrlUtils.getRoot(strUrl);

		boolean t = false;
		// if (approvedRoots.contains(targetRoot) &&
		// urlPattern.contains(UrlUtils.urlPattern(strUrl))
		if (approvedRoots.contains(targetRoot) && StringUtils.containsNone(strUrl, "#?")) {// 根域Approved，以及url
			// pattern
			// Approved
			for (String s : urlPattern) {
				if (UrlUtils.patternMatches(strUrl, s)) {// 任意一个match即可
					t = true;
					break;
				}
			}
		}
		return t;
	}

	/**
	 * 将java Date格式转成Solr支持的UTC时间
	 * 
	 * @param date
	 * @return
	 */
	public static String getSolrDate(Date date) {
		SimpleDateFormat sdf1 = new SimpleDateFormat("yyyy-MM-dd");
		SimpleDateFormat sdf2 = new SimpleDateFormat("HH:mm:ss");
		sdf2.setTimeZone(TimeZone.getTimeZone("UTC"));
		String result = sdf1.format(date) + "T" + sdf2.format(date) + "Z";
		return result;
	}

	public Site getSite() {
		return site;
	}

}
