package com.szj360.spider;


import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.log4j.Logger;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Request;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.selector.Html;

import com.google.common.collect.ImmutableMap;
import com.szj360.utils.HtmlUtils;

public class CommonPageProcessor implements PageProcessor {

    // 部分一：抓取网站的相关配置，包括编码、抓取间隔、重试次数等
    private Site site = Site.me().setRetryTimes(3).setSleepTime(3000);

    @Override
    // process是定制爬虫逻辑的核心接口，在这里编写抽取逻辑
    public void process(Page page) {
        // 部分二：定义如何抽取页面信息，并保存下来
//        page.putField("author", page.getUrl().regex("https://github\\.com/(\\w+)/.*").toString());
//        page.putField("name", page.getHtml().xpath("//h1[@class='entry-title public']/strong/a/text()").toString());
        
//    	如果是listpage
    	
//    	Pattern p = Pattern.compile("(http://www.cnbeta.com/articles/\\w+)");
    	String url = page.getUrl().toString();
    	if(url.matches("http://www.cnbeta.com/articles/[\\s\\S]+")){
    		Logger.getLogger(getClass()).info("detailpage");
//    		提取 标题 内容， 抓取图片和视频等对象。
    		page.putField("type", "detailpage");
    		page.putField("title", HtmlUtils.replaceTag(page.getHtml().$("#news_title").get()));
    		page.putField("content", HtmlUtils.replaceTag(page.getHtml().$(".article_content").get()));
   		

    		page.putField("date",HtmlUtils.replaceTag(page.getHtml().$(".date").toString()));
    		page.putField("src",page.getUrl());
    		
    		
    		
    		
//    		找出content的图片并加入处理列表
    		Pattern p = Pattern.compile("<img[\\S\\s]*?src=\"(http://static.cnbetacdn.com/[\\s\\S]*?)\"");
    		String content = page.getResultItems().get("content");
    		Matcher m = p.matcher(content);
    		while(m.find()){
    			String img = m.group(1);
    			page.addTargetRequest(new Request(img).putExtra("contentUrl", page.getUrl()));
//    			page.addTargetRequest(img);
    			Logger.getLogger(getClass()).info(img);
    		}
    		
//    		Logger.getLogger(getClass()).info(content);
    	}else if(url.matches("http://static.cnbetacdn.com[\\s\\S]*")){
    		page.putField("type", "img");
    		page.putField("img", page.getRawText());
    		page.putField("link", url);
    		Object contentUrl =  page.getRequest().getExtra("contentUrl");
//    		Logger.getLogger(getClass()).info(contentUrl);
    		page.putField("contentUrl", contentUrl);
//			使用专用的img spider下载并处理
    		
    	}else{
//        	找到子页面列表
        	List<String> itmList = page.getHtml().$(".item").all();
        	for (String string : itmList) {
        		
//    			System.out.println(string);
    			Html h = new Html(string);
//    			System.out.println(attr(h.$(".title a").toString(),"href").toString());
    			page.addTargetRequest(attr(h.$(".title a").toString(),"href").toString());
//    			System.out.println(h.$(".title a").regex("<.*?>(.*?)<.*?>").toString());
    		}
//            page.putField("readme", page.getHtml().xpath("//div[@id='readme']/tidyText()"));

//          部分三：从页面发现后续的url地址来抓取
            page.addTargetRequests(page.getHtml().links().regex("("+this.detailPageRegex+")").all());
    	}
    }
    
    private String detailPageRegex;
    @Override
    public Site getSite() {
        return site;
    }
    
    public static String attr(String htmlElement, String attr) {
		String attrVal = "[\\S\\s]*" + attr + "=\"([\\s\\S]*?)\"[\\S\\s]*";
		// String attrVal = "[\\S\\s]*"+attr+"([\\s\\S]*?)/[\\S\\s]*";
		if (htmlElement == null)
			return "";
		Matcher m = Pattern.compile(attrVal).matcher(htmlElement);
		if (m.find())
			return m.group(1);
		return "";
	}
    
    public static Object getPageDtl(Page page){
    	
    	return null;
    }

	public String getDetailPageRegex() {
		return this.detailPageRegex;
	}

	public CommonPageProcessor setDetailPageRegex(String detailPageRegex) {
		this.detailPageRegex = detailPageRegex;
		return this;
	}
}
