package com.sentiment.crawler;

import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.regex.Pattern;

import org.apache.log4j.PropertyConfigurator;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.sentiment.config.Config;
import com.sentiment.config.ConfigCrawler;
import com.sentiment.contentextractor.ContentExtractor;
import com.sentiment.contentextractor.News;
import com.sentiment.database.bean.CrawlNews;
import com.sentiment.nlpalgo.bean.Double2;
import com.sentiment.nlpalgo.bean.Keywords;
import com.sentiment.nlpalgo.method.AlgoHttp;
import com.sentiment.tools.Format;
import com.sentiment.tools.Generator;
import com.sentiment.webcollector.crawler.DeepCrawler;
import com.sentiment.webcollector.model.Links;
import com.sentiment.webcollector.model.Page;

/**
 * 根据关键词 爬取百度新闻搜索引擎结果 保存到CrawlNews Bean对象中
 * 
 * @author 王骏科
 *
 */
public class BaiduNewsCrawler extends DeepCrawler {
	private static final int PN = 10;
	private static final Logger LOG = LoggerFactory.getLogger(BaiduNewsCrawler.class);
	// 从该List中获取CrawlNews对象
	private volatile List<CrawlNews> newsList = new ArrayList<CrawlNews>();
	// 需要爬取的页数，由getSearch传入 new ArrayList<CrawlNews>();
	private int pages;

	static {
		System.out.println(Config.log4jPropertiesPath);
		PropertyConfigurator.configure(Config.log4jPropertiesPath);
	}

	public List<CrawlNews> getNewsList() {
		return newsList;
	}

	public BaiduNewsCrawler() {
		// TODO Auto-generated constructor stub
	}

	@Override
	public Links visitAndGetNextLinks(Page page) {
		// TODO Auto-generated method stub
		String url = page.getUrl();
		Links links = new Links();
		if (Pattern.matches("http://news.baidu.com/ns\\?.*word=.*", url)) {
			depthOneInfo(page);
			links.addAllFromDocument(page.getDoc(), ".c-title");
			return links;
		} else {
			try {
				// 放弃了单独解析每个站点的新闻页面，采用webcollector提供的算法解析新闻正文
				News n = ContentExtractor.getNewsByHtml(page.getHtml());
				for (CrawlNews cn : newsList) {
					if (cn.getUrl().equals(page.getUrl())) {
						List<String> text = CrawlerUtils.crawlText(n.getContentElement());
						cn.setText(text);
						cn.setDate(new Date());
						// 用算法接口和不用算法接口
						if (Config.useNlp) {
							AlgoHttp algoHttp = new AlgoHttp();

							cn.setSentiment(algoHttp.sentiment(text, AlgoHttp.SENTIMENT_NEWS));

							cn.setKeywords(algoHttp.keywords(text));
						} else {
							// 随机生成的测试数据 ， 只生成3组测试数据
							List<Double2> list = new ArrayList<Double2>();
							for (int i = 0; i < 3; i++) {
								double rd = Generator.genenrateDouble(0, 1);
								list.add(new Double2(1 - rd, rd));
							}
							cn.setSentiment(list);
							List<Keywords> list2 = new ArrayList<Keywords>();

							for (int i = 0; i < 3; i++) {
								String keyword = Generator.generateKeyword();
								list2.add(new Keywords(Generator.genenrateDouble(0, 1), keyword));
							}
							cn.setKeywords(list2);
						}
					}
				}
			} catch (Exception e) {
				// TODO Auto-generated catch block
				LOG.error(e.toString());
			}
			return null;
		}
	}

	public static void main(String[] args) {
		BaiduNewsCrawler crawler = new BaiduNewsCrawler();
		crawler.getSearch("双十一", 3);
		List<CrawlNews> list = crawler.getNewsList();
		LOG.info(list.toString());
	}

	/**
	 * @param keyword
	 *            爬取的关键词
	 * @param pages
	 *            爬取的页数
	 */
	public void getSearch(String keyword, int pages) {
		this.pages = pages;
		try {
			keyword = URLEncoder.encode(keyword, "utf-8");
			for (int pn = 0; pn < pages; pn++) {
				addSeed(ConfigCrawler.baiduCrw + keyword + "&pn=" + pn * PN);
			}
			start(2);
		} catch (Exception e) {
			LOG.error(e.toString());
		}
	}

	private void depthOneInfo(Page page) {
		Document doc = page.getDoc();
		// LOG.info(doc.toString());
		// 起始的id
		int begin = 0;
		// 定位到开始id
		for (; begin < pages; begin++) {
			if (doc.getElementById(String.valueOf(begin * PN + 1)) != null)
				break;
		}
		// 一个页面有PN篇新闻
		outer: for (int i = 1; i <= PN; i++) {
			Element ele = doc.getElementById(String.valueOf(begin * PN + i));
			// 可能页面新闻数不满PN篇
			if (ele == null)
				continue;
			// 根据title来排除重复的页面，原因是PN有时候为10，有时候为20
			String title = ele.getElementsByTag("h3").first().text();
			if (newsList != null) {
				for (CrawlNews e : newsList) {
					if (e.getTitle() == title)
						continue outer;
				}
			}
			CrawlNews news = new CrawlNews();

			news.setTitle(title);
			news.setUrl(ele.getElementsByTag("a").first().absUrl("href"));

			String[] publisherAndTime = ele.getElementsByClass("c-author").first().text().split("[\\u00a0\\s]");

			news.setPublisher(publisherAndTime[0]);
			StringBuilder sbd = new StringBuilder();
			for (int j = 1; j < publisherAndTime.length; j++) {
				sbd.append(publisherAndTime[j] + "_");
			}
			news.setReleaseTime(baiduTimeFormat(sbd.toString()));

			if (ele.getElementsByClass("c-more_link") != null && ele.getElementsByClass("c-more_link").first() != null
					&& ele.getElementsByClass("c-more_link").first().text() != null) {
				news.setReprint(Integer
						.parseInt(ele.getElementsByClass("c-more_link").first().text().replaceAll("[^0-9]", "")));
			} else {
				news.setReprint(0);
			}
			newsList.add(news);
		}
	}

	private Date baiduTimeFormat(String time) {
		if (time.contains("年")) {
			// 年/月/日格式
			return Format.string2Date(time, 5);
		} else {
			// XXXX前格式
			return Format.beforeTime(time);
		}
	}
}
