/**
 * 文件名:WorksCrawler.java
 * 包名:com.wx.demo.wcd.crawler
 * 创建时间:2017年2月22日下午4:09:21
 * <p>Copyright ®医界互联 TUS 平台版权所有。</p>
 *
 */

package com.wx.demo.wcd.crawler;

import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;

import org.apache.log4j.Logger;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;

import com.wx.demo.wcd.domain.po.AuthorPO;
import com.wx.demo.wcd.domain.po.WorksPO;
import com.wx.demo.wcd.domain.qo.AuthorQO;
import com.wx.demo.wcd.pub.UUIDGenerator;
import com.wx.demo.wcd.service.AuthorService;
import com.wx.demo.wcd.service.WorksService;
import com.wx.demo.wcd.service.impl.AuthorServiceImpl;
import com.wx.demo.wcd.service.impl.WorksServiceImpl;

/**
 * 类名:WorksCrawler <br/>
 * 创建时间: 2017年2月22日 下午4:09:21 <br/>
 * 〈功能简述〉古诗文网站诗词数据爬取. <br/>
 * 〈功能详细描述〉.<br/>
 * 
 * @author 王笑 Email:wangxiao@yijiehulian.com.
 * @version
 * @since JDK 1.8
 * @see
 */
public class WorksCrawler extends BreadthCrawler {
	private Logger logger = Logger.getLogger(getClass());

	/**
	 * 读取Spring配置信息
	 */
	AbstractApplicationContext ctx = new ClassPathXmlApplicationContext(
			new String[] { "classpath*:/spring/spring-dao.xml",
					"classpath*:/spring/spring-mvc.xml",
					"classpath*:/spring/spring-service.xml" });
	AuthorService authorService = (AuthorServiceImpl) ctx
			.getBean("authorService");

	WorksService worksService = (WorksServiceImpl) ctx.getBean("worksService");

	public static void main(String[] args) throws Exception {
		WorksCrawler crawler = new WorksCrawler("depth_crawler", true);
		// 需要爬取的朝代数据集合
		Map<String, Integer> map = new HashMap<String, Integer>();
		// map.put("诗", 49148);
		// map.put("词", 21342);

		// map.put("先秦", 516);
		// map.put("两汉", 116);
		// map.put("魏晋", 99);
		// map.put("南北朝", 191);
		// map.put("隋代", 17);

		// map.put("唐代", 92);
		// map.put("五代", 138);
		// map.put("宋代", 2000);
		// map.put("金朝", 16);
		// map.put("元代", 28);
		// map.put("明代", 43);
		// map.put("清代", 292);

		int t = 0;
		for (Map.Entry<String, Integer> entry : map.entrySet()) {
			// 总页面
			int ps = entry.getValue().intValue();
			if ((ps % 10) == 0) {
				t = (ps / 10);
			} else {
				if ((ps / 10) > 0) {
					t = (ps / 10) + 1;
				} else {
					t = 1;
				}
			}
			// 迭代生成需要爬取的URL
			for (int i = 1; i <= t; i++) {
				crawler.addSeed(new CrawlDatum(
						"http://so.gushiwen.org/type.aspx?p=" + i + "&c="
								+ entry.getKey() + "&x=词").meta("depth", "1"));
			}
		}
		/* 线程数 */
		crawler.setThreads(100);
		/* 爬取任务的最大执行次数 */
		crawler.setMaxExecuteCount(10);
		/* 设置执行间隔 */
		crawler.setExecuteInterval(100);
		/* 设置每次迭代中爬取数量的上限 */
		crawler.setTopN(t + 1);
		/* 设置断点爬取 */
		// crawler.setResumable(true);
		// 爬取深度
		crawler.start(3);
	}

	public WorksCrawler(String crawlPath, boolean autoParse) {
		super(crawlPath, autoParse);
	}

	@Override
	public void visit(Page page, CrawlDatums next) {
		try {

			// 当前url
			String url = page.getUrl();
			logger.info("URL:\n" + url);

			// 有效节点信息
			Elements Dom = page.select("div[class=main3]");
			Elements ets = Dom.select("div[class=sons]");

			// 初始化作品对象集合
			ArrayList<WorksPO> lw = new ArrayList<WorksPO>();

			for (Element et : ets) {
				// 初始化作品对象
				WorksPO works = new WorksPO();
				works.setWorksId(UUIDGenerator.generate());// 作品ID
				works.setCreateTime(new Date());// 创建时间
				String theme = url.substring(url.indexOf("x=") + 2,
						url.length());// 题材
				if ("诗".equals(theme)) {
					works.setTheme("02");
				} else if ("词".equals(theme)) {
					works.setTheme("03");
				} else {
					works.setTheme("01");
				}

				Elements es = et.select("p");
				int j = 0;
				for (Element e : es) {
					j++;
					if (j == 1) {
						String worksName = e.text();
						if (worksName != null && !"".equals(worksName)) {
							works.setWorksName(worksName);// 作品名称
						}
					}
					if (j == 2) {
						String authorName = e.text().substring(
								e.text().indexOf("：") + 1,
								e.text().indexOf(" "));
						if (authorName != null && !"".equals(authorName)) {
							AuthorQO aq = new AuthorQO();
							aq.setAuthorName(authorName);// 作者姓名
							java.util.List<AuthorPO> las = authorService
									.selectCondition1(aq);
							if (las.size() > 0) {
								works.setAuthorId(las.get(0).getAuthorId());// 作者
							} else {
								works.setOtherAuthor(authorName);
							}
						}
					}
					if (j == 3) {
						String content = e.text();
						if (content != null && !"".equals(content)) {
							works.setContent(content);// 作品内容
						}
					}
				}
				if (works.inputDescription() != null
						&& !"".equals(works.inputDescription())) {
					// 将对象添加到list集合
					lw.add(works);
				}
			}

			int rows = worksService.insertBatch(lw);
			if (rows > 0) {
				logger.info("数据抓取并存储成功！");
			} else {
				logger.info("数据抓取并存储失败！");
			}
		} catch (Exception e) {
			logger.debug(e.getMessage());
			logger.info(e.getMessage());
		}
	}
}
