package com.xiaotu.spider.project.pageprocessor;

import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import net.sf.json.JSONObject;

import org.apache.commons.lang3.StringUtils;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Request;
import us.codecraft.webmagic.selector.Html;

import com.xiaotu.common.exception.SpiderException;
import com.xiaotu.common.exception.SpiderExceptionCode;
import com.xiaotu.common.util.FileUtil;
import com.xiaotu.common.util.PageUtils;
import com.xiaotu.common.util.RegexUtils;
import com.xiaotu.common.util.SepratorUtil;
import com.xiaotu.spider.APageProcessor;
import com.xiaotu.spider.downloader.SpiderHttpClientDownloader;

/**
 * @类名 NewsBaiDuPageProcessor
 * @日期 2017年6月7日
 * @作者 王艳龙
 * @功能 百度头条新闻数据爬取
 */
@Component("NewsBaiDuPageProcessor")
@Scope("prototype")
public class NewsBaiDuPageProcessor extends APageProcessor {

	private static final String ESCAPE_PAGENO = "#pageNo#";
	private static final String ESCAPE_WORD = "#word#";
	
	private static final String BAIDU_NEWS_LIST_URL = "http://news.baidu.com/ns?word="
	+ESCAPE_WORD+"&pn="+ ESCAPE_PAGENO + 
	"&cl=2&ct=1&tn=news&rn=20&ie=utf-8&bt=0&et=0&rsv_page=1";
	
	private static final String NEW_BAIDU_TITLE = "标题";
	private static final String NEW_BAIDU_SOURCE = "来源";
	private static final String NEW_BAIDU_TIME = "时间";
	private static final String NEW_BAIDU_SUMMARY = "简介";
	private String fileName;

	private static final List<String> LABELS_ALL = Arrays.asList("标题", "来源","时间", "简介");
	
	private String pageNo;
	private String lastPageNo;
	
	@Override
	public void initRunParams(Map<String, Object> map)
	{
		if (map.get("pageNo") != null)
			pageNo =  map.get("pageNo")+ StringUtils.EMPTY;
		if (map.get("lastPageNo") != null)
			lastPageNo = map.get("lastPageNo") + StringUtils.EMPTY;
		if (map.get("fileName") == null)
			throw new SpiderException(
					SpiderExceptionCode.PageProcessorParameterErrorException);
		fileName = FileUtil
				.getCacheFilePath(map.get("fileName") + StringUtils.EMPTY);
	}
	
	@Override
	protected Request getStartRequest(JSONObject target) {
		target.put(ESCAPE_PAGENO, pageNo);
		target.put(ESCAPE_WORD, this.getNewsWord(target.getString("url")));
		target.putAll(this.getOtherTarget());
		return new Request(this.getUrl(target));
	}

	@Override
	public void process(Page page) {
		String lastPages = "";
		String lastNo ="";
		List<String> divList = page.getHtml()
				.xpath("//div[@class='result']").all();
		if (divList == null || divList.isEmpty())
			throw new SpiderException(SpiderExceptionCode.DataInvalideException);
		Map<String, String> map = new HashMap<String, String>();
		for (String divStr : divList) {
			Html divObj = new Html(divStr.replaceAll("<em>", "").replaceAll("</em>", ""));
			for (String key : LABELS_ALL) {
				this.parseBaseinfo(divObj, map, key);
			}
			StringBuffer buffer = new StringBuffer();
			for (String key : LABELS_ALL)
				buffer.append(SepratorUtil.SEP_PIPE
						+ (map.containsKey(key) ? map.get(key)
								: StringUtils.EMPTY));
					FileUtil.writeFile(fileName, buffer.toString()
							+ SepratorUtil.LINE_SEPRATOR);
		}
		
		String next = page.getHtml()
				.xpath("//p[@id='page']/strong/span[@class='pc']/text()")
				.get();
		List<String> lastPage = page.getHtml()
				.xpath("//p[@id='page']/a[@class='n']/@href")
				.all();
		if (lastPage.size()>1){
			for(int i=0;i<=lastPage.size();i++){
				lastPages =lastPage.get(1);
			}
		}else{
			lastPages = page.getHtml()
					.xpath("//p[@id='page']/a[@class='n']/@href").get();
		}
		
		lastNo = lastPages.substring(lastPages.indexOf("&pn=")+4,lastPages.indexOf("&cl="));
		if (StringUtils.isNotEmpty(next))
		{
			next = RegexUtils.regexSearch(RegexUtils.REGEX_INTEGER, next);
			if (StringUtils.isNotEmpty(next))
			{
				if (Integer.valueOf(lastNo) <= Integer.valueOf(lastPageNo)*20){
					PageUtils.addTargetRequest(page,
							lastPages,
							page.getRequest().getExtras());
				}
			}
		}
}


	/**
	 * TEXT文本基本信息解析
	 * @param html 页面元素
	 * @param map缓存
	 */
	private void parseBaseinfo(Html html, Map<String, String> map, String key) {
		StringBuffer buffer = new StringBuffer();
		if (key.contains(NEW_BAIDU_TITLE)) {
			String title = html.xpath("//div[@class='result']/h3/a/text()")
					.get().replaceAll(",", "");
			if (title == null || title.isEmpty())
				return;
			map.put(NEW_BAIDU_TITLE,
					buffer.append(title).append(SepratorUtil.SEP_COMMA_EN)
							.toString());

		} else if (key.contains(NEW_BAIDU_SOURCE)) {
			String source_time = html.xpath("//p[@class='c-author']/text()")
					.get();
			if (source_time == null || source_time.isEmpty())
				return;
			String[] source_date =source_time.split("  ");
			if (source_date == null || source_date.length<=0)
				return;
			
			String source =source_date[0];
			map.put(NEW_BAIDU_SOURCE,
					buffer.append(source).append(SepratorUtil.SEP_COMMA_EN)
					.toString());

		}else if(key.contains(NEW_BAIDU_TIME)){
			String source_time = html.xpath("//p[@class='c-author']/text()")
					.get();
			if (source_time == null || source_time.isEmpty())
				return;
			String[] source_date =source_time.split("  ");
			if (source_date == null || source_date.length<=0)
				return;
			
			String time =source_date[1];
			map.put(NEW_BAIDU_TIME,
					buffer.append(time).append(SepratorUtil.SEP_COMMA_EN)
					.toString());
			
		} else if (key.contains(NEW_BAIDU_SUMMARY)) {
			String summary;
			summary = html.xpath("//div[@class='c-summary c-row ']/text()")
					.get();
			if(summary == null || summary.isEmpty()){
				summary = html.xpath("//div[@class='c-span18 c-span-last']/text()")
						.get();
				
				if (summary == null || summary.isEmpty())
					return;
			}
			
			map.put(NEW_BAIDU_SUMMARY,
					buffer.append(summary.replaceAll(",", "")).append(SepratorUtil.SEP_COMMA_EN)
							.toString());
		}
	}
	
	/**
	 * 从url中解析出word
	 * @param url url
	 * @return 评论id
	 */
	private String getNewsWord(String url){
		String word ="";
		if(url.contains("&pn")){
			word = url.substring(url.indexOf("word=")+5,url.indexOf("&pn"));
		}else{
			word = url.substring(url.indexOf("word=")+5,url.indexOf("&sr"));
		}
		if (StringUtils.isEmpty(word))
			throw new SpiderException(SpiderExceptionCode.DataInvalideException,
					url);
		return word;
	}
	
	
	/**
	 * 获取百度头条列表的url地址
	 * @param target 百度头条地址
	 * @return url
	 */
	private String getUrl(JSONObject target)
	{
		int pageNo = target.getInt(ESCAPE_PAGENO);
		String word =target.getString(ESCAPE_WORD);
		String pageNoSuf = StringUtils.EMPTY;
		target.put(SpiderHttpClientDownloader.HEADER_REFERER,
				"http://news.baidu.com/ns?/" 
						+ "/word/" + pageNoSuf);
		String url = BAIDU_NEWS_LIST_URL
				.replaceAll(ESCAPE_PAGENO, pageNo*20 + StringUtils.EMPTY)
				.replaceAll(ESCAPE_WORD, word + StringUtils.EMPTY);
		return url;
	}
	
	@Override
	protected String getSpiderType() {
		return "NewsBaiDu";
	}

	@Override
	public String getPageDataType(Page page) {
		return "NewsBaiDu";
	}

}