package cn.jc.spider.neww.main;

import java.util.Date;

import org.apache.commons.lang.math.NumberUtils;
import org.apache.torque.linkage.SpiderJcMapInit;

import cn.jc.spider.neww.Crawler;
import cn.jc.spider.neww.URLqueue;
import cn.jc.spider.neww.storer.StorerXiaohuaImpl;

public class XiaoHua {

	/**
	 * 快乐麻花
	 * 每小时执行一次，从第一到第十页，如果30条重复数据，停止任务（这个是在考查了网站的更新机制的基础之上做的策略）
	 * @param args
	 * @author JiangChi
	 * @throws Exception 
	 */
	public static void main(String[] args) throws Exception {
		SpiderJcMapInit.init_my();
		int from = 1;
		int to = 10;
		int maxrepeatnumber = 30;
		if (args.length > 2) {
			from = NumberUtils.toInt(args[0]);
			to = NumberUtils.toInt(args[1]);
			maxrepeatnumber = NumberUtils.toInt(args[2]);
		}
		StorerXiaohuaImpl s = new StorerXiaohuaImpl(maxrepeatnumber);
		getdatafromxiaohua(from,to,s);
	}
	private static int timetime = new Date().getSeconds();//当前URL状态，为第几次请求
	private static void getdatafromxiaohua(int from, int to, StorerXiaohuaImpl s) throws Exception {
		for (int page = from; page <= to; page++) {
			System.out.println("正在请求第"+page+"页数据");
			String url;
			if (page > 1) {
				url = "http://www.kl688.com/newjokes/index_" + page + ".htm";
			} else {
				url = "http://www.kl688.com/index.htm";
			}
			if (s.getTimetime() > s.getTime()) {
				System.out.println("发现了"+s.getTimetime()+"条重复数据，请求终止");
				break;
			}
			URLqueue.inserturl(url,8);
			Crawler.runTask(8,s,timetime, "GBK");
			try {
				Thread.sleep(5*1000);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}
	}
}
