package com.xiaotu.spider.tianya.pageprocessor;

import java.util.List;
import java.util.Map;

import org.apache.commons.lang.StringUtils;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Request;
import us.codecraft.webmagic.selector.Html;

import com.xiaotu.common.exception.SpiderException;
import com.xiaotu.common.exception.SpiderExceptionCode;
import com.xiaotu.common.model.TVTianYaModel;
import com.xiaotu.common.util.Constants;
import com.xiaotu.common.util.PageUtils;
import com.xiaotu.common.util.RegexUtils;
import com.xiaotu.common.util.TVProcessUtils;
import com.xiaotu.spider.APageProcessor;

import net.sf.json.JSONObject;

/**
 * @类名 TVTianYaPageProcessor
 * @日期 2017年2月16日
 * @作者 高海军
 * @功能 作品天涯数据爬取
 */
@Component("TVTianYaPageProcessor")
@Scope("prototype")
public class TVTianYaPageProcessor extends APageProcessor
{
	
	protected static final String SEARCH_URL = "http://search.tianya.cn/bbs";
	
	@Override
	protected Request getStartRequest(JSONObject target)
	{
		return new Request(SEARCH_URL + "?q="
				+ TVProcessUtils.getTVKeyWord(target) + "&s=4&pn=1");// 按照发帖时间取第1页
	}
	
	@Override
	protected String getSpiderType()
	{
		return Constants.SpiderTaskType.TV_TIANYA;
	}
	
	@Override
	public String getPageDataType(Page page)
	{
		if (page.getUrl().toString().contains(SEARCH_URL))
			return Constants.PageDataType.SEARCH_LIST;
		return Constants.PageDataType.TIANYA;
	}
	
	@Override
	public void process(Page page)
	{
		Map<String, Object> tvInfoMap = page.getRequest().getExtras();
		if (Constants.PageDataType.SEARCH_LIST
				.equals(this.getPageDataType(page)))
			this.searchListProcess(page, tvInfoMap);
		else
			this.dataProcess(page, tvInfoMap);
	}
	
	protected void dataProcess(Page page, Map<String, Object> tvInfoMap)
	{
		String[] data = this.getPageData(page);
		
		page.putField(TVTianYaModel.class.getSimpleName(),
				new TVTianYaModel(tvInfoMap.get("tvid") + "",
						tvInfoMap.get("tvName") + "", data[0], data[1], data[3],
						data[4], data[2]));
	}
	
	protected String[] getPageData(Page page)
	{
		if (page.getUrl().toString().contains("http://bbs.tianya.cn/m/"))
			return this.getPageDataFromMWeb(page);
		else
			return this.getPageDataFromWeb(page);
	}
	
	private String[] getPageDataFromWeb(Page page)
	{
		Html html = page.getHtml();
		
		String title = html.xpath("//h1[@class=\"atl-title\"]/span/span/text()")
				.get();
		title = StringUtils.isNotBlank(title) ? title
				: html.xpath("//div[@class=\"q-title\"]/h1/span/text()").get();
		
		String user = html.xpath("//div[@class=\"atl-info\"]//a[1]/text()")
				.get();
		user = StringUtils.isNotBlank(user) ? user
				: html.xpath("//div[@class=\"q-info\"]/span/a/text()").get();
		
		String time = html.xpath("//div[@class=\"atl-info\"]/span[2]/text()")
				.get();
		time = StringUtils.isNotBlank(time) ? time
				: RegexUtils.regexSearch(
						"(\\d{4})-(\\d{2})-(\\d{2}) (\\d{2}):(\\d{2}):(\\d{2})",
						html.xpath("//div[@class=\"q-info\"]/span[2]/text()")
								.get());
		
		String click = html.xpath("//div[@class=\"atl-info\"]/span[3]/text()")
				.get();
		click = StringUtils.isNotBlank(click) ? click
				: RegexUtils.regexSearch(RegexUtils.REGEX_INTEGER,
						RegexUtils.regexSearch("点击：" + RegexUtils.REGEX_INTEGER,
								html.xpath(
										"//div[@class=\"q-info\"]/span[2]/text()")
										.get()));
		
		String reply = html.xpath("//div[@class=\"atl-info\"]/span[4]/text()")
				.get();
		reply = StringUtils.isNotBlank(reply) ? reply
				: RegexUtils.regexSearch(RegexUtils.REGEX_INTEGER,
						RegexUtils.regexSearch("回复：" + RegexUtils.REGEX_INTEGER,
								html.xpath(
										"//div[@class=\"q-info\"]/span[2]/text()")
										.get()));
		
		this.valideData(title, user, time, click, reply);
		
		return new String[] { title, user,
				time.trim().replace("时间：", StringUtils.EMPTY),
				RegexUtils.regexSearch(RegexUtils.REGEX_INTEGER, click),
				RegexUtils.regexSearch(RegexUtils.REGEX_INTEGER, reply) };
	}
	
	private String[] getPageDataFromMWeb(Page page)
	{
		Html html = page.getHtml();
		
		String title = html.xpath("//div[@class=\"title\"]/h1/text()").get();
		String user = html.xpath("//h4[@class=\"author\"]/text()").get();
		String time = html
				.xpath("//div[@class=\"content\"]/div/a[@class=\"info\"]/p/text()")
				.get();
		String click = html.xpath("//div[@class=\"title\"]/div//i[1]/text()")
				.get();
		String reply = html.xpath("//div[@class=\"title\"]/div//i[2]/text()")
				.get();
		
		this.valideData(title, user, time, click, reply);
		
		return new String[] { title, user, time + ":00",
				RegexUtils.regexSearch(RegexUtils.REGEX_INTEGER, click),
				RegexUtils.regexSearch(RegexUtils.REGEX_INTEGER, reply) };
	}
	
	private void valideData(String title, String user, String time,
			String click, String reply)
	{
		if (StringUtils.isBlank(title) || StringUtils.isBlank(user)
				|| StringUtils.isBlank(time) || StringUtils.isBlank(click)
				|| StringUtils.isBlank(reply))
			throw new SpiderException(
					SpiderExceptionCode.DataInvalideException);
	}
	
	private void searchListProcess(Page page, Map<String, Object> tvInfoMap)
	{
		Html html = page.getHtml();
		List<String> linkList = html
				.xpath("//div[@class='searchListOne']/ul/li/div/h3/a/@href")
				.all();
		
		// List<String> titlsList = html
		// .xpath("//div[@class='searchListOne']/ul/li/div/h3/a/allText()")
		// .all();
		// List<String> authorList = html
		// .xpath("//div[@class='searchListOne']/ul/li/p[@class='source']/a[2]/text()")
		// .all();
		// List<String> dateList = html
		// .xpath("//div[@class='searchListOne']/ul/li/p[@class='source']/span[1]/text()")
		// .all();
		// List<String> replyList = html
		// .xpath("//div[@class='searchListOne']/ul/li/p[@class='source']/span[2]/text()")
		// .all();
		
		if (linkList == null || linkList.isEmpty())
			throw new SpiderException(
					SpiderExceptionCode.DataInvalideException);
		
		for (String link : linkList)
			PageUtils.addTargetRequest(page, link, tvInfoMap);
		
		String url = page.getUrl().toString();
		if (url.contains("&pn=1"))// 当前为第1页
		{
			linkList = html.xpath("//div[@class='long-pages']/a").all();
			if (linkList != null && !linkList.isEmpty())// 有第2页
				PageUtils.addTargetRequest(page, url.replace("&pn=1", "&pn=2"),
						tvInfoMap);
		}
		
		if (url.contains("&s=4"))// 是按照发帖时间
			PageUtils.addTargetRequest(page, url.replace("&s=4", "&s=6"),
					tvInfoMap);// 按照回复时间
	}
	
}
