package com.welsmann.app.start.spider.process;

import java.text.ParseException;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.horrabin.horrorss.RssFeed;
import org.horrabin.horrorss.RssItemBean;
import org.horrabin.horrorss.RssParser;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;

import com.welsmann.app.start.spider.entity.Newspider;
import com.welsmann.app.start.spider.service.NewsService;

/**
 * 信息抓取
 * 
 * @author Welsmann
 * 
 */
public class Spider extends BaseSpider {

	private static final Log log = LogFactory.getLog(Spider.class);

	private static final String defaultAuthor = "网络";
	
	private static final HashMap<String, String> defaultAuthorMap = new HashMap<String, String>(){{
		put("sina.com.cn", "新浪网");
	}};
	
	private static Date latestPost = new Date();
	
	/**
	 * 读取规则-->解析规则-->读取数据-->销毁规则
	 */

	@Override
	public void run() {
		List<Newspider> lstSpider = NewsService.getAllNewspider();
		if (lstSpider == null || lstSpider.isEmpty()) {
			log.error("没有取得任何抓取规则，本次抓取进程已终止。");
			return;
		}
		for (Newspider ns : lstSpider) {
			latestPost = NewsService.getLatestPostDate(ns.getType());
			if (latestPost == null) {
				try {
					latestPost = sdf.parse("2013-03-15 00:00:00");
				} catch (ParseException e) {
					e.printStackTrace();
				}
			}
			fetch(ns);
		}
	}

	private void fetch(Newspider newspider) {
		RssParser parser = new RssParser(newspider.getSource());
		parser.setCharset("utf-8");
		try {
			RssFeed feed = parser.load();
			List<RssItemBean> lstItem = feed.getItems();
			if (lstItem == null || lstItem.isEmpty()) {
				log.error("没有解析到任何有效的文章信息，解析的网站为" + newspider.getSource());
			} else {
				String title = "";
				String time = "";
				String content = "";
				String link = "";
				String adv = "";
				String author = "";
				Set<String> dks = defaultAuthorMap.keySet();
				for (RssItemBean item : lstItem) {
					title = item.getTitle();
					time = sdf.format(item.getPubDate());
					link = item.getLink();
					author = item.getAuthor();
					if (isEmpty(author)) {
						author = defaultAuthor;
					} else {
						for(String d : dks) {
							if (author.toLowerCase().contains(d.toLowerCase())) {
								author = defaultAuthorMap.get(d);
								break;
							}
						}
					}
					if ("##NORULE##".equals(newspider.getRule())) {
						content = item.getDescription();
						if (!isEmpty(content) && content.length() > 40) {
							NewsService.saveNews(title, time, author, content, newspider.getType(), link);
							log.info(String.format("新闻[%s]已成功保存", title));
						} else {
							log.debug(String.format("新闻[%s]内容过短，不进行采集", title));
						}
					} else {
						if (item.getPubDate().after(latestPost)) {
							// 采取内容
							Document article = Jsoup.connect(item.getLink())
									.userAgent(agent).timeout(30000).get();
							Elements es = null;
							if (newspider.getRule().indexOf("%") > 0) {
								String[] rules = newspider.getRule().split("%");
								es = article.select(rules[0]);
								Elements ads = article.select(rules[1]);
								if (ads != null && !ads.isEmpty()) {
									adv = ads.first().html();
								}
							} else {
								es = article.select(newspider.getRule());
							}
							if (es != null && !es.isEmpty()) {
								content = es.first().html();
								if (!isEmpty(adv)) {
									content = content.replaceAll(adv, "");
								}
								if (!isEmpty(content) && content.length() > 40) {
									NewsService.saveNews(title, time, author, content, newspider.getType(), link);
									log.info(String.format("新闻[%s]已成功保存", title));
								} else {
									log.debug(String.format("新闻[%s]内容过短，不进行采集", title));
								}
							}
						} else {
							log.info(String.format("新闻[%s]已被跳过", title));
						}
					}
				}
			}
		} catch (Exception ex) {
			log.error("解析网站数据的时候发生了错误，解析的网站为" + newspider.getSource()
					+ "，错误堆栈如下：");
			ex.printStackTrace();
		}
	}

	private boolean isEmpty(String str) {
		return str == null || str.trim().length() == 0;
	}
}
