package com.spider.ask120.webmagic.task;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;

import org.apache.log4j.Logger;
import org.jdiy.core.Rs;

import com.alibaba.fastjson.JSONObject;
import com.spider.ask120.service.ArticleService;
import com.spider.ask120.service.ChannelArticleInfoService;
import com.spider.ask120.service.ChannelService;
import com.spider.ask120.webmagic.Base;
import com.spider.utils.HttpClientUtil;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;

/**
 * 健康百科频道
 * @author Shan
 *
 */
public class Baike extends Base implements PageProcessor{
	public static Logger logger = Logger.getLogger(Baike.class);
	static String jkbkUrl = "http://m.120ask.com/baike/";
	static String viewRegex = "http://m.120ask.com/baike/art/[0-9]+\\?a_id=[0-9]+";
	private String arti = "http://m.120ask.com/baike/art/";
	static String listUrl = "/baike/\\w+";
	
	// 部分一：抓取网站的相关配置，包括编码、抓取间隔、重试次数等
	private Site site = Site.me().setRetryTimes(3).setSleepTime(1);

	public Site getSite() { 
		return site;
	}

	// process是定制爬虫逻辑的核心接口
	/* (non-Javadoc)
	 * @see us.codecraft.webmagic.processor.PageProcessor#process(us.codecraft.webmagic.Page)
	 */
	public void process(Page page) {
		String pic_dir = fun.getProValue(BAIKE_PIC_FILE_PATH);
		//进入列表页
		if(page.getUrl().regex(jkbkUrl+"$").match()) { 
			//获取健康百科分类
			List<String> baike = page.getHtml().links().regex(listUrl).all();
			// 去重
			HashSet<String> set = new HashSet<String>(baike);
			baike = new ArrayList<String>();
			set.remove("/baike/art");
			set.remove("/baike/yiyuan");
			baike.addAll(set);
			page.addTargetRequests(baike);
			
		}else if(page.getUrl().regex(listUrl+"$").match()){
			//获取频道信息
			String url = page.getUrl().toString();
			String catalog = url.substring(url.indexOf("baike/")+6, url.length());
			Rs ch = ChannelService.getChannelByStrId(catalog);
			Integer ch_id = ch.getInt("id");
			Rs ca = ChannelArticleInfoService.getInfoByChId(ch_id);
			Integer str_id = ca.getInt(ARTICLE_STR_ID);
			Integer bigger_str_id = str_id;
			//循环请求列表
			Integer count = 1;
			Integer pageIndex = 1;
			boolean flag = true;
			list:
			while (flag) {
				//获取到话题页列表 
				Map<String, String> params = new HashMap<String, String>();
				params.put("p", pageIndex + "");
				//post请求
				String listJson = HttpClientUtil.getInstance().sendHttpPost(page.getUrl().toString(), params);
				String[] strings = getStrings(listJson);
				if (null != strings) {
					for (String string : strings) {
						String news = unicodeToUtf8(string)+"}";
						int i = news.indexOf("\"i\":\"");
						int t = news.indexOf("\",\"t");
						String id = news.substring(i+5, t);
						//System.out.println("classification:"+catalog);//分类
						//System.out.println("id:"+id);//id
						String title = news.substring(news.indexOf("t\":\"")+4, news.indexOf("\",\"c"));
						//System.out.println("title:" + title);//文章标题
						String abst = news.substring(news.indexOf("s\":\"")+4, news.indexOf("\"}"));
						//System.out.println("abstract:"+abst);//文章简介
						String pic = news.substring(news.indexOf("c\":\"")+4,news.indexOf("\",\"s"));
						//System.out.println("image:"+pic);//文章图片
						//System.out.println(arti+id);
						
						//如果文章Id比数据库中存储的最新文章Id小 跳出循环
						Integer current_article_str_id = Integer.parseInt(id); 
						if (str_id >= current_article_str_id) {
							break list;
						}
						
						Rs art = ArticleService.getArticleByStrId(ch_id+"", title, id);//判断文章是否已存在
						String a_id = "";
						if (art.isNull()) {
							Rs article = new Rs("article");
							//下载图片到本地并返回图片地址
							String pic_path = fun.downloadPic(pic,pic_dir+catalog+"/");
							article.set("channel_id", ch_id);
							article.set("title", title);
							article.set("pic", pic_path);
							article.set("abst", abst);
							article.set("str_id", id);
							ArticleService.saveArticle(article);
							a_id = article.get("id");
						}else {
							a_id = art.get("id");
						}
						
						//将文章详情页Url加入到请求队列
						page.addTargetRequest(arti+id+"?a_id="+a_id);
						
						//记录最新文章str_id
						if (count == 1) {
							bigger_str_id = current_article_str_id;
							count = 2;
						}
					}

				}else{
					flag = false;
				}
				pageIndex++;
//				break;
			}
			ca.set(ARTICLE_STR_ID, bigger_str_id);
			ChannelArticleInfoService.save(ca);
		}else if(page.getUrl().regex(viewRegex+"$").match()){
//			String title = page.getHtml().xpath("//div[@class='question']/h1/text()").get();
			//page.putField("title", title);
			List<String> list = page.getHtml().xpath("//div[@class='questionList clears']/ul/li/a/text()").all();
			//page.putField("contents", list);//目录列表
			//			//page.putField("pageContent", page.getHtml().xpath("//div[@class='answer']/div/html()").all());
			String pic = page.getHtml().xpath("//center/img/@src").get();
			//System.out.println("pic: "+pic);
			int i = 3;
			List<Map<String,Object>> cont = new ArrayList<Map<String,Object>>();
			while (true) {
				String subtitle = page.getHtml().xpath("//div[@class='answer']["+i+"]/h4/text()").get();//小标题
				if (null ==subtitle) {
					break;
				}
				//System.out.println("subtitle: "+(i - 2)+"."+subtitle);
				String content = page.getHtml().xpath("//div[@class='answer']["+i+"]/div/html()").get();//内容
				String con = content.replace("<p class=\"ask_p\"></p>", "");
				//System.out.println("content: "+con);
				Map<String,Object> map = new HashMap<String,Object>();
				map.put((i - 2)+"."+subtitle, con);
				cont.add(map);
				i++;
			}
			String url = page.getUrl().get();
			//文章id
			String id = url.substring(url.indexOf("_id=")+4, url.length());
			Rs art = ArticleService.getArticleById(id);
			if (art.isNull()) {
				System.err.println("article is not exist");
			}else {
				Rs ch = ChannelService.getChannelById(art.getString("channel_id"));
				Map<String,Object> map;
				map = new HashMap<String, Object>();
				if (!ch.isNull()) {
					//下载图片到本地并返回图片地址
					String pic_path = fun.downloadPic(pic, pic_dir + ch.getString("str_id") + "/");
					map.put("pic", pic_path);
				}else {
					map.put("pic", "");
				}
				map.put("list", list);
				map.put("cont", cont);
				art.set("content", JSONObject.toJSONString(map));
				ArticleService.updateArticle(art);
			}
		}

	}
	/**
	 * 根据字符串获取字符串数组
	 * @param str
	 * @return
	 */
	public String[] getStrings(String str){
		String [] strings = null;
		if(null != str && !str.isEmpty()){
			String string = str.substring(2, str.length()-1);
			strings = string.split("},");
		}
		return strings;
	}
	
	public static void runBaike(){
		String url = "http://m.120ask.com/baike/";
		Spider s = Spider.create(new Baike()).addUrl(url);
		s.thread(1).start();
	}
	public static void main(String[] args) throws Exception {
//		Function fun = new Function();
//		String spider_scheduler_path = fun.getProValue(BAIKE_SCHEDULER_PATH);
//		FileCacheQueueScheduler scheduler = new FileCacheQueueScheduler(spider_scheduler_path);
//		Spider s = Spider.create(new Baike()).addUrl("http://m.120ask.com/baike/").scheduler(scheduler);
//		SpiderMonitor.instance().register(s);
//		s.thread(1).start();
		String url = "http://m.120ask.com/baike/nvxing";
		Spider s = Spider.create(new Baike()).addUrl(url);
		s.thread(1).start();
	}
}
