package com.wzgl.core.webmagic.processor;

import java.io.Serializable;
import java.util.Date;
import java.util.List;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;

import com.jfinal.kit.StrKit;
import com.jfinal.plugin.redis.Redis;
import com.wzgl.core.CacheConsts;
import com.wzgl.core.model.Tgrid;
import com.wzgl.core.vo.MeiWenVo;
import com.wzgl.core.webmagic.MagicUtils;
import com.wzgl.core.webmagic.pipeline.TgridsPipeLine;


/**
 * @author code4crafter@gmail.com <br>
 * @since 0.6.0
 */
public class MeiwenPageProcessor extends MeiWenVo implements PageProcessor,Serializable,Job {
	/**
	 * 
	 */
	private static final long serialVersionUID = 1L;

	/***
	 * addHeader 网站默认是zip模式 如果网站没有使用zip 则需要增加Header
	 */
    private Site site = Site.me().setRetryTimes(1).setSleepTime(1000).addHeader("Accept-Encoding", "/");
    //图片前缀
    String pic_ = "http://meiwen.59xihuan.cn";
    static Spider s ;
    int size = 1;
    Tgrid t = null;
    StringBuffer sb = new StringBuffer();
   
    @Override
    public void process(Page page) {
    	 if(page.getUrl().regex("http://meiwen.59xihuan.cn/meiwen").match()){
    		 String  cont = "";
    		 //在列表结果集中拿出信息
    		 Tgrid t = Redis.use(CacheConsts.CACHE_MAGIC_GIRD_NAME).get(page.getUrl());
    		 //标题
    		 String pic_text = page.getHtml().xpath("//div[@class='pic_text0']").toString();
    		 String pic_url =  page.getHtml().xpath("//div[@class='pic_text0']/p/img").get();
    		 Document docList = Jsoup.parse(pic_url);  
    		 Elements liTag=docList.getElementsByTag("img");  
    		 String src = liTag.get(0).attr("src");
    		 String c_url = MagicUtils.downloadPic(pic_+src);
    		 if(StrKit.notBlank(c_url)){
    			 pic_text = pic_text.replaceFirst(src, c_url);
    			 cont = pic_text.substring(pic_text.indexOf("<p>"), pic_text.lastIndexOf("</p>")+4);
    		 }
    		 t.setFhtml(cont);
    		 t.setFcoverImg(c_url);
    		 Redis.use(CacheConsts.CACHE_MAGIC_GIRD_NAME).set(page.getUrl(), t);
    		 sb.append("读取第"+size+"详情.................").append("\r");
    		 String log = Redis.use(CacheConsts.CACHE_LOG_MAGIC_NAME).get(pic_);
    		 log += sb.toString();
    		 Redis.use(CacheConsts.CACHE_LOG_MAGIC_NAME).set(pic_,log);
    		 size++;
    	 }else{
    		 List<String> page_left= page.getHtml().xpath("//div[@class='main']/div[@class='mLeft']/div[@class='post']").all();
    		 int num=1;
        	 for(String li:page_left){
        		 t = new Tgrid();
        		 Document docList = Jsoup.parse(li);
        		 //网页详情
        		 String href = docList.getElementsByClass("detail1").select("a").attr("href");
        		 //列表发布时间
        		 String time = docList.getElementsByClass("time").text();
        		 //列表发布时间
        		 String title = docList.getElementsByClass("detail1").select("a").attr("title");
        		 //验证发布日期是否今天
//        		 if(DateUtil.compareDate(time)==0){
        		 // 验证该url是否已经爬取(暂时爬取第一页)
    			 Object o = Redis.use(CacheConsts.CACHE_MAGIC_GIRD_NAME).get(href);
    			 if(o != null){
    				 continue;
    			 }
//        		 }else{
//        			 continue;
//        		 }
        		 t.setFdate(new Date());
        		 t.setFtitle(title);
        		 t.setFaddress(href);
        		 t.setFgetDate(time);
        		 t.setFdescribe(getFpage());
        		 t.setFtype(getFtype());
        		 t.setFmzgicId(getFmzgicId());
        		 Redis.use(CacheConsts.CACHE_MAGIC_GIRD_NAME).set(href, t);
        		 //爬取日志
        		 sb.append("第"+num+"文章开始读取----------------->\r");
        		 sb.append("第"+num+"标题:"+title).append("\r");
        		 sb.append("第"+num+"文章详情URL:"+href).append("\r");
        		 sb.append("第"+num+"文章时间:"+time).append("\r");
        		 sb.append("第"+num+"文章开始结束----------------->").append("\r");
        		 String log = Redis.use(CacheConsts.CACHE_LOG_MAGIC_NAME).get(pic_);
        		 log += sb.toString();
        		 Redis.use(CacheConsts.CACHE_LOG_MAGIC_NAME).set(pic_,log);
        		 //先把列表中的数据放入缓存 再冲详情中找出正文更新数据库.
        		 page.setSkip(true);
        		 //获取文章正文
        		 page.addTargetRequest(href);
        		 
        		 num++;
        	 }
    	 }
    }

    @Override
    public Site getSite() {
        return site;
    }

    public static void main(String[] args) {
    	 /**
    	  * 单独使用 redis缓存机制
    	  */
	     
        s = Spider.create(new MeiwenPageProcessor()).addUrl(getFurl()).thread(getFthread());    
    	s.addPipeline(new TgridsPipeLine());
    	s.run();
    }

	@Override
	public void execute(JobExecutionContext context) throws JobExecutionException {
		// TODO Auto-generated method stub
		s = Spider.create(new MeiwenPageProcessor()).addUrl("http://meiwen.59xihuan.cn").thread(1);    
    	s.addPipeline(new TgridsPipeLine());
//    	s.run();
    	s.start();
	}
}