package com.goldgov.utils;

import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Date;
import java.util.List;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Document.OutputSettings;
import org.jsoup.safety.Whitelist;
import com.alibaba.fastjson.JSON;
import com.goldgov.information.service.Article;
import com.goldgov.information.service.ArticleRules;
import com.goldgov.information.service.CrawlerTempletEnum;
import com.goldgov.information.service.CrawlerTemplets;
import com.goldgov.information.service.impl.ArticleRulesServiceImpl;
import com.goldgov.information.service.impl.ArticleServiceImpl;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Request;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;


@Deprecated
public class CrawlerUtils implements PageProcessor {

	protected final Log logger = LogFactory.getLog(this.getClass());
	public CrawlerUtils(){}
	
	// 抓取网站的相关配置，包括编码、抓取间隔、重试次数等
	private Site site = Site.me().setRetryTimes(3).setSleepTime(10);
	private int level=1;
	private static int count;//成功爬取数量
	private static String rulesID;//规则ID

	@Override
	public void process(Page page) {
		ArticleRules ar = JSON.parseObject(page.getRequest().getExtra("_rules").toString(),ArticleRules.class) ;
		Article art=new Article();
		this.level = (int) page.getRequest().getExtra("_level");
		//	封装Bean
		toBean(ar,art,page);
		//保存Bean
		saveBean(ar,art,page);
		//获得article link
		getArticleLink(ar,art,page);
		//获得article list link
		getArticleListLink(ar,page);
	}

	public CrawlerUtils(String rulesID) {
		try {
			//TODO 判断此规则是否已配置自定义模板
			CrawlerTempletEnum ctEnum=CrawlerTempletEnum.val(rulesID);
			if(null!=ctEnum) {
				new CrawlerTemplets(ctEnum);
				return;
			}
			//重置爬取记录数
			setCount(0);
			setRulesID(rulesID);
			//如果没有配置自定义模板则从数据库读取规则
			ArticleRules ar = BeanUtil.getBean(ArticleRulesServiceImpl.class).getArticleRulesByID(rulesID);
			// 进行爬取
			Request requests = new Request();
			requests.putExtra("_level", 1);
			requests.putExtra("_rules", JSON.toJSONString(ar));
			requests.setUrl(ar.getUrlAddress());
			Spider.create(new CrawlerUtils())
						.addRequest(requests)
						.isTest(false)
						// 开启10个线程抓取
						.thread(10)
						// 启动爬虫
						.start();
		} catch (Exception e) {
			logger.error("===========>爬虫问题", e);
		}
	}

	void toBean(ArticleRules ar,Article art,Page page) {
		// 判断数据深度及是否需要正则补充
		if (judgeLevel(ar.getTitleRules())) {
			if (null == getRuleValueSupply(ar.getTitleRules())) 
				art.setArticleTitle(page.getHtml().xpath(getRuleValue(ar.getTitleRules())).toString());
			else
				art.setArticleTitle(page.getHtml().xpath(getRuleValue(ar.getTitleRules())).regex(getRuleValueSupply(ar.getTitleRules())).toString());
		}
		if (judgeLevel(ar.getDateRules())) {
			if (null == getRuleValueSupply(ar.getDateRules()))
				art.setPublishDateText(page.getHtml().xpath(getRuleValue(ar.getDateRules())).toString());
			else
				art.setPublishDateText(page.getHtml().xpath(getRuleValue(ar.getDateRules())).regex(getRuleValueSupply(ar.getDateRules())).toString());
		}
		if (judgeLevel(ar.getSourceRules())) {
			if (null == getRuleValueSupply(ar.getSourceRules()))
				art.setArticleSource(page.getHtml().xpath(getRuleValue(ar.getSourceRules())).toString());
			else
				art.setArticleSource(page.getHtml().xpath(getRuleValue(ar.getSourceRules())).regex(getRuleValueSupply(ar.getSourceRules())).toString());
		}
		if (judgeLevel(ar.getAuthorRules())) {
			if (null == getRuleValueSupply(ar.getAuthorRules()))
				art.setArticleAuthor(page.getHtml().xpath(getRuleValue(ar.getAuthorRules())).toString());
			else
				art.setArticleAuthor(page.getHtml().xpath(getRuleValue(ar.getAuthorRules())).regex(getRuleValueSupply(ar.getAuthorRules())).toString());
		}
		if (judgeLevel(ar.getContentRules())) {
			if (null == getRuleValueSupply(ar.getContentRules()))
				art.setArticleContent(page.getHtml().xpath(getRuleValue(ar.getContentRules())).toString());
			else
				art.setArticleContent(page.getHtml().xpath(getRuleValue(ar.getContentRules())).regex(getRuleValueSupply(ar.getContentRules())).toString());
		}
	}
	
	void saveBean(ArticleRules ar,Article art,Page page) {
		if (ar.getLevel().intValue() == (int)page.getRequest().getExtra("_level")) {
			art.setRulesID(ar.getRulesID());
			art.setLastModifyTime(new Date());
			art.setArticleUrl(page.getUrl().get());
			art.setActiveState(Article.ACTIVE_STATE_YES);
			art.setPublishState(Article.PUBLISH_STATE_NO);
			art.setCmsCategoryID(art.getCmsCategoryID());
			art.setCmsCategoryName(art.getCmsCategoryName());
			art.setAuditState(ArticleRules.AUDIT_STATE_YES == ar.getIsAudit() ? Article.AUDIT_STATE_YES: Article.AUDIT_STATE_NO);
			//保存文章
			BeanUtil.getBean(ArticleServiceImpl.class).addArticle(art);
//			获取当前爬取数量
			synchronized(this) {
				CrawlerUtils.count++;
			}
		}
	}
	
	void getArticleLink(ArticleRules ar,Article art,Page page) {
		if (null!=ar.getUrlRules()) {
			List<String> links = page.getHtml().xpath(ar.getUrlRules()).all();// 文章链接集合
			for (String link : links) {
				link=getAbsoluteURL(ar.getUrlAddress(),link);
				//当文章已存则跳过
				if(1<=BeanUtil.getBean(ArticleServiceImpl.class).existArticleByUrl(link)) {
					return;
				}
				Request requests = new Request();
				requests.putExtra("_level", (int)page.getRequest().getExtra("_level") + 1);
				requests.putExtra("_rules", JSON.toJSONString(ar));
				requests.setUrl(link);
				page.addTargetRequest(requests);
			}
		}
	}
	
	void getArticleListLink(ArticleRules ar,Page page) {
		if(null!=ar.getPagingRules()) {
			List<String> waitRulList=page.getHtml().xpath(ar.getPagingRules()).all();
			for(String s:waitRulList) {
				//logger.info(s);
				Request requests = new Request();
				requests.putExtra("_level", 1);
				requests.putExtra("_rules", JSON.toJSONString(ar));
				requests.setUrl(getAbsoluteURL(page.getUrl().toString(),s));
				//跳过已添加的地址
				page.setSkip(true);
				page.addTargetRequest(requests);
			}
		}
	}
	/**
	 * 去除html样式
	 * */
	public static String toPlainText(final String html) {
		if (html == null) {
			return "";
		}
		final Document document = Jsoup.parse(html);
		final OutputSettings outputSettings = new Document.OutputSettings().prettyPrint(false);
		document.outputSettings(outputSettings);
		document.select("br").append("\\n");
		document.select("p").prepend("\\n");
		document.select("p").append("\\n");
		final String newHtml = document.html().replaceAll("\\\\n", "\n");
		final String plainText = Jsoup.clean(newHtml, "", Whitelist.none(), outputSettings);
		final String result = StringEscapeUtils.unescapeHtml(plainText.trim());
		return result;
	}
	
	/**
	 * 相对路径转绝对路径
	 * @param baseURI
	 * @param relativePath
	 * @version 1.0.0
	 */
	@SuppressWarnings("finally")
	public static String getAbsoluteURL(String baseURI, String relativePath){
	    String abURL=null;
	    try {
	        URI base=new URI(baseURI);//基本网页URI
	        URI abs=base.resolve(relativePath);//解析于上述网页的相对URL，得到绝对URI
	        URL absURL=abs.toURL();//转成URL
	        abURL = absURL.toString();
	    } catch (MalformedURLException e) {
	        e.printStackTrace();
	    } catch (URISyntaxException e) {
	        e.printStackTrace();
	    } finally{
	        return abURL;
	    }
	}
	
	@Override
	public Site getSite() {
		return site;
		
	}
	
	public int getLevel() {
		return level;
	}

	public void setLevel(int level) {
		this.level = level;
	}

	public boolean judgeLevel(String str) {
		if (null == str || "" .equals(str))
			return false;
		else
			return getLevel() == Integer.valueOf(str.split(",")[0]).intValue() ? true : false;
	}

	public String getRuleValue(String str) {
		return str.split(",")[1];
	}

	public String getRuleValueSupply(String str) {
		if (str.split(",").length < 3)
			return null;
		else
			return str.split(",")[2];
	}

	public static int getCount() {
		return count;
	}

	public static void setCount(int count) {
		CrawlerUtils.count = count;
	}

	public static String getRulesID() {
		return rulesID;
	}

	public static void setRulesID(String rulesID) {
		CrawlerUtils.rulesID = rulesID;
	}
}