package com.wzgl.core.webmagic.processor;

import java.io.Serializable;
import java.util.Date;
import java.util.List;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;

import com.jfinal.plugin.redis.Redis;
import com.wzgl.core.CacheConsts;
import com.wzgl.core.model.TfinishUrl;
import com.wzgl.core.model.Tgrid;
import com.wzgl.core.server.FinishurlService;
import com.wzgl.core.utils.WZGLUtils;
import com.wzgl.core.vo.GameSkyVo;
import com.wzgl.core.webmagic.pipeline.TgridsPipeLine;
import com.wzgl.core.webmagic.utils.HrefUtils;
import com.wzgl.core.webmagic.utils.PicUtiles;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;


/**
 * 游迅网   咨询    每天一页   大概40篇
 * @author Administrator
 *
 */
public class YxdownNewsProcessor extends GameSkyVo implements PageProcessor,Serializable,Job{
	private static final long serialVersionUID = 1L;
	private static FinishurlService me = FinishurlService.me;
    private static Tgrid t = null;
    private static Spider s ;
    private int size = 1;
    private StringBuffer sb = new StringBuffer();
    private Site site = Site.me().setRetryTimes(1).setSleepTime(1000).addHeader("Accept-Encoding", "/");
    private String pic_ = "http://www.yxdown.com";
	private GameSkyVo tf ;
	
	@Override
	public void execute(JobExecutionContext context) throws JobExecutionException {
		s = Spider.create(new YxdownNewsProcessor()).addUrl("http://www.yxdown.com/news/").thread(1);    
    	s.addPipeline(new TgridsPipeLine());
//    	s.getStatus();
    	s.start();
    	System.out.println("游迅网咨询-------------------------执行完毕");
	}
	
	public static void main(String[] args) {
		s = Spider.create(new YxdownNewsProcessor()).addUrl("http://www.yxdown.com/news/").thread(1);    
//    	s.addPipeline(new TgridsPipeLine());
//    	s.getStatus();
		s.start();
    	System.out.println("游迅网咨询-------------------------执行完毕");
	}

	@Override
	public void process(Page page) {
		 if(page.getUrl().regex("http://www.yxdown.com/news/2017").match()){
			 String  cont = "";
    		 //在列表结果集中拿出信息
			 Tgrid tgrid = (Tgrid) WZGLUtils.getCache(CacheConsts.CACHE_MAGIC_GIRD_NAME, page.getUrl());
			 
			 //作者
			 String from =  page.getHtml().xpath("//div[@class='title']").toString();
			 String ffrom ="";
			 if("".equals(from) || from == null){
    			 from = "网络";
    		 }else{
    			 ffrom = Jsoup.parse(from)
        				 .select("div").select("p").select("b").text();
    		 }
			 tgrid.setFfrom(ffrom);
    		 System.out.println("来源:  "+ffrom+"\n");
    		 
    		 //正文
    		 String pic_text = page.getHtml().xpath("//div[@class='detailTxt']").toString();
    		 cont = pic_text.substring(pic_text.indexOf("<p"), pic_text.lastIndexOf("/>"));
    		 
    		 //控制获取分页 
 	       	 List<String> hrefs = page.getHtml().xpath("//div[@id='pages']/a").all();
 	       	 if(hrefs.size()>0){
 	       		String repex = "//div[@class='detailTxt']";
 	       		cont = HrefUtils.getOtherHrefsFromTwo(hrefs, cont,repex);
 	       	 }
    		 
    		 //下载图片
    		 List<String> pic_urls = page.getHtml().xpath("//div[@class='detailTxt']/p/a/img").all();
    		 List<String> pic_hrefs = page.getHtml().xpath("//div[@class='detailTxt']/p/a").all();
    		 if(pic_urls.size()<=0){
    			 tgrid.setFcoverImg(null);
    			 tgrid.setFcoverImg2(null);
    		 }else{
    			 tgrid = PicUtiles.downPics(pic_urls, tgrid);
    			 //替换所有的图片链接和图片地址
        		 cont = PicUtiles.replaceAllHrefAndImg(pic_hrefs,pic_urls,cont);
    		 }
    		 
    		 System.out.println("cont:  "+cont+"\n");
    		 tgrid.setFhtml(cont);

    		 //通过url缓存此对象
    		 String cachename = CacheConsts.CACHE_MAGIC_GIRD_NAME;
			 String name = cachename+":"+page.getUrl();
			 WZGLUtils.setCache(cachename, name, tgrid);
			 //更新爬取日志
    		 sb.append("读取第"+size+"详情.................").append("\n\r");
    		 String log = (String) WZGLUtils.getCache(CacheConsts.CACHE_LOG_MAGIC_NAME,"爬取日志:"+pic_);
    		 log += sb.toString();
    		 String log_cache = CacheConsts.CACHE_LOG_MAGIC_NAME;
    		 String log_name = CacheConsts.CACHE_LOG_MAGIC_NAME+":"+pic_;
    		 WZGLUtils.setCache(log_cache, log_name, log);
    		 
    		 size++;
		 }else{
			 List<String> page_1= page.getHtml().xpath("//div[@class='new_zixun']/div").all();
	   		 int num1=1;
	       	 for(int i = 0;i<page_1.size();i++){
	       		t = new Tgrid(); 
	       		String li = page_1.get(i);
	       		Document docList = Jsoup.parse(li);
	       		//网页详情
	       		String href = docList.select("h2").select("a").attr("href");
	       		//列表中显示的图片地址
	       		String img = docList.getElementsByClass("pic_img").select("img").attr("src");
	       		//列表标题
	       		String title = docList.select("h2").select("a").text();
	       		//列表摘要
	       		String desc = docList.getElementsByClass("txt_zixun").text();
	       		//列表发布时间
	       		String time = docList.getElementsByClass("bpleft").select("span").get(1).text();
	       		//是否已经爬取过
	       		System.out.println("地址："+href);
	       		System.out.println("标题："+title+"");
	       		System.out.println("封面："+img+"");
	       		System.out.println("时间："+time+"");
	       		System.out.println("摘要："+desc+"\n");
	       		String cn = CacheConsts.CACHE_MAGIC_GIRD_NAME; 
	       		String key = CacheConsts.CACHE_MAGIC_GIRD_NAME+":"+href;
	       		if(!WZGLUtils.exists(cn, key)){
	       			tf = new GameSkyVo();
	       			tf.setFdescribe("巡游游戏-资讯");
	       			tf.setFmzgicId("afadfafaf1123113");
	       			tf.setFtype("100008");
	       			//保存到对象
			       	t.setFtitle(title);t.setFdate(new Date());t.setFtitle(title);
			       	t.setFaddress(href);t.setFgetDate(time);t.setFdescribe(tf.getFdescribe());
			       	t.setFtype(tf.getFtype());t.setFmzgicId(tf.getFmzgicId());t.setFstatus(0);
			       	//把爬取了的url放入缓存
			       	String cachename = CacheConsts.CACHE_MAGIC_GIRD_NAME;
					String name = cachename+":"+href;
					WZGLUtils.setCache(cachename, name, t); 
					//记录爬取日志
	        		sb.append("第"+num1+"文章开始读取----------------->\r");
	        		sb.append("第"+num1+"标题:"+title).append("\r");
	        		sb.append("第"+num1+"文章详情URL:"+href).append("\r");
	        		sb.append("第"+num1+"文章时间:"+time).append("\r");
	        		sb.append("第"+num1+"文章开始结束----------------->").append("\r");
	        		//记录日志加入缓存
	        		String log = Redis.use(CacheConsts.CACHE_LOG_MAGIC_NAME).get(pic_);
	        		log += sb.toString();
	        		String log_cache = CacheConsts.CACHE_LOG_MAGIC_NAME;
	        		String log_name = CacheConsts.CACHE_LOG_MAGIC_NAME+":"+pic_;
	        		WZGLUtils.setCache(log_cache, log_name, log);
	        		//把爬取了的url加入数据库
	        		TfinishUrl tf = new TfinishUrl();
	        		tf.setFurl(href);
	        		tf.setFdate(new Date());
	        		me.save(tf);
		       	    page.setSkip(true);
		       		//获取文章正文
		       		page.addTargetRequest(href);
	       		}
	       	 }
		 }
	}

	@Override
	public Site getSite() {
		return site;
	}
}
