package com.leaderment.timatt.webmagic.pageprocessor;

import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;

import org.apache.log4j.Logger;
import org.springframework.web.client.RestTemplate;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.leaderment.timatt.ApplicationInitializer;
import com.leaderment.timatt.controller.InitUpsData;
import com.leaderment.timatt.service.impl.CrawlerServiceImpl;
import com.leaderment.timatt.webmagic.util.DBUtil;
import com.leaderment.timatt.webmagic.util.DataStorageToLocal;
import com.leaderment.timatt.webmagic.util.IDataStorage;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;

/*亚马逊搜索关键词抓取页面处理*/
public class MyPageProcessor implements PageProcessor {

	SimpleDateFormat sdf=new SimpleDateFormat("yyyy_MM_dd_HH");
	//抓取时间
	Date crawlDate;
	//url json信息
	String urlJsonString;
	
	RestTemplate restTemplate;
	
	Logger logger=Logger.getLogger(MyPageProcessor.class);
	
	Integer urlId;
	
	public MyPageProcessor(RestTemplate restTemplate,Date crawlDate,String url){
		this.crawlDate=crawlDate;
		this.restTemplate=restTemplate;
		
	}
	
	public MyPageProcessor(Integer urlId, RestTemplate restTemplate,Date crawlDate,String urlJsonString){
		this.urlId=urlId;
		this.crawlDate=crawlDate;
		this.urlJsonString=urlJsonString;
		this.restTemplate=restTemplate;
		initPageProcessor();
	}
	
	// 爬虫数据类型
	String crawlerDataType;
	// 爬虫访问的设备类型(pc,phone)
	String crawlerDeviceType;
	// url
	String url;
	// 页数限制
	Integer pageLimit;
	// 其他参数信息
	String parameter;
	
	 // 随机数索引
    int randomIndex;
	
    // 爬虫接受通过的状态码，用于对指定状态码进行处理
  	Set<Integer> acceptStatCode=new HashSet<Integer>(){{
  			 	 add(200); 
  			     add(407);  
  			     add(403);
  			     add(500); 
  			     add(502);
  			     add(503); 
  			     }};
  	
    // 爬虫请求头相关设置参数
    private Site site=Site.me()
    		.setDisableCookieManagement(false)																	//禁用cookies
    		.setTimeOut(60000)																					//设置超时时间
    		.setUseGzip(true)																					//设置使用gzip
    		.setSleepTime(1000)	
    		.setAcceptStatCode(acceptStatCode)
    		.addHeader("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8")	//设置header
    		.addHeader("Accept-Encoding","gzip, deflate")
    		.addHeader("Accept-Language","zh-CN,zh;q=0.8")
    		.addHeader("Upgrade-Insecure-Requests","1")
    		;
     
    // userAgent,随机的userAgent池
    String userAgentForPC[]={
		 "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
		 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"	,
		 "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E"	,
		 "Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko",
		 "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36",
		 "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)",
		 "Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1",
		 "Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3",
		 "Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12",
		 "Opera/9.27 (Windows NT 5.2; U; zh-cn)",
		 "Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0",
		 "Opera/8.0 (Macintosh; PPC Mac OS X; U; en)",
		 "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6",
		 "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)",
		 "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
		 "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)",
		 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ",
		 "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)",
		 "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ",
		 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER",
		 "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)",
		 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11"
     };
     String userAgentForPhone[]={
    		"Mozilla/5.0 (Linux; U; Android 4.4.2; zh-cn; PE-TL20 Build/HuaweiPE-TL20) AppleWebKit/537.36 (KHTML, like Gecko)Version/4.0 MQQBrowser/5.3 Mobile Safari/537.36",
			"Mozilla/5.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/600.1.3 (KHTML, like Gecko) Version/8.0 Mobile/12A4345d Safari/600.1.4",
			"Mozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X; en-us) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53",
			"Mozilla/5.0 (iPad; CPU OS 7_0 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53",
			"Mozilla/5.0 (Linux; Android 4.3; Nexus 7 Build/JSS15Q) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.72 Safari/537.36"
     };
	
	// 初始化
	public void initPageProcessor(){
		JSONObject urlObject=JSONObject.parseObject(urlJsonString);
		urlId=urlObject.getInteger("urlId");
		crawlerDataType=urlObject.getString("crawlerDataType");
		crawlerDeviceType=urlObject.getString("crawlerDeviceType");
		url=urlObject.getString("url");
		pageLimit=urlObject.getInteger("pageLimit");
		parameter=urlObject.getString("parameter");
		// 解析额外参数
		JSONObject parameterObj=JSONObject.parseObject(parameter);
		JSONArray headersArray=parameterObj.getJSONArray("headers");
		
		
		//判断使用pc端的userAgent还是手机端的userAgent
		if(crawlerDeviceType.equals("PC")){
			randomIndex=(int)(Math.random()*userAgentForPC.length);
			site.addHeader("User-Agent",userAgentForPC[randomIndex]);
		}else{
			randomIndex=(int)(Math.random()*userAgentForPhone.length);
			site.addHeader("User-Agent",userAgentForPhone[randomIndex]);
		}
		
		//遍历headers并设置
		for(int i=0;i<headersArray.size();i++){
			JSONObject obj=headersArray.getJSONObject(i);
			Map map=obj;
			Iterator entries = map.entrySet().iterator(); 
			while (entries.hasNext()) { 
			  Map.Entry entry = (Map.Entry) entries.next(); 
			  String key = (String) entry.getKey(); 
			  String value = (String) entry.getValue(); 
			  System.out.println("Key = " + key + ", Value = " + value); 
			  site.addHeader(key, value);
			}
		}
		
		
	}
	Integer currentPage=1;//默认第1页
    		
    //解析页面的方法
    @Override
    public void process(Page page) {
    	//当前抓取页面的URL
		try {
			Thread.sleep(2000);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
		String requestURL=page.getRequest().getUrl();
    	logger.info("requestURL:"+requestURL);
    	
    	//获取抓取结果
    	if(page.getBytes()==null||page.getStatusCode()!=200){
    		//失败，将当前页的url放入队列
			addURL(page, requestURL);
    		return;
    	}
    	// 保存结果为文件存储
    	String responseStr=page.getRawText();
    	logger.info("页面输出："+requestURL);
		//logger.info(responseStr);
		
		String fileName=crawlerDataType+"_"+crawlerDeviceType+"_"+currentPage+"_"+sdf.format(crawlDate)+".html";
		IDataStorage dataStorage;
		dataStorage=new DataStorageToLocal();
		String filePath=dataStorage.save(crawlDate, fileName, requestURL, currentPage, responseStr);
		DBUtil.saveFile(urlId, filePath, crawlDate, currentPage);
		
		// 抓取的数据提交到爬虫解析模块
		String result="";
		//result="{\"code\":1,\"nextPageUrl\":\"https://www.amazon.co.jp/gp/bestsellers/instant-video/ref=zg_bs_instant-video_pg_2/356-4625921-7768320?ie=UTF8&pg=2&ajax=1\",\"nextPage\":2,\"totalPage\":5}";
		result=requestForParsing(responseStr,requestURL,crawlerDataType, parameter,crawlDate);
		if(result==null||("").equals(result)){
			return;
		}
		
		System.out.println("result:"+result);
		JSONObject resultObj=JSONObject.parseObject(result);
		//{code:xxx,nextPage:x,nextUrl:xxx}
		Integer code=resultObj.getInteger("code");
		Integer nextPage=resultObj.getInteger("nextPage");
		String nextPageUrl=resultObj.getString("nextPageUrl");
		if(code==1){
			//如果下一页与当前页相等，说明爬取完成
			if(currentPage==nextPage){
				//爬取完成
				stopCrawler(urlId);
				return;
			}
			//成功，将下一页的url放入队列
			addURL(page, nextPageUrl);
			//当前页设置为下一页
			currentPage=nextPage;
			return;
		}
		if(code==2){
			//失败，将当前页的url放入队列
			addURL(page, requestURL);
			return;
		}
		if(code==3){
			//爬取完成
			stopCrawler(urlId);
			return;
		}
		if(code==4){
			//页面错误
			//记录异常url
			logger.error("error url:"+requestURL+",urlId:"+urlId);
			return;
		}
    }
    
    /**
     * 请求解析
     * @param html
     * @param jsonString
     * @return
     */
    public String requestForParsing(String html,String crawlerURL,String crawlerDataType,String jsonString,Date crawlDate) {
		String result = "";
        String url = "http://"+crawlerDataType+"/parse/html";
        //String url = "http://DEPARTMENT-RANKING/parse/html";
        Map<String,Object> map= new HashMap<>();
        map.put("response",html);
        map.put("currentPage",currentPage);
        map.put("requestUrl",crawlerURL);
        map.put("parameters", jsonString);
        map.put("crawlDate", crawlDate);
        result= restTemplate.postForObject(url, map, String.class);
        return result;
	}
    
    /**
     * 发现新的URL并放入爬取队列
     * @param page	page实体
     * @param url	失败的URL
     */
    public void addURL(Page page,String url){
		page.addTargetRequest(url);
    }
    /**
     * 停止爬虫
     * @param urlId
     */
    public void stopCrawler(Integer urlId){
    	//完成后移除阻塞队列的个数
		try {
			CrawlerServiceImpl.threadBlockQueue.take();
			//完成后手动将对应的spider停止
			Spider spider=InitUpsData.spiderMap.get(urlId);
			spider.close();
			Thread.sleep(5000);
			spider.stop();
			logger.info("爬取完成");
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
    }
    @Override
    public Site getSite() {
        return site;
    }
}
