import entity.PoemInfo;
import filehandler.PushIntoFile;
import spider.PullFromWeb;

import java.util.List;
import java.util.Scanner;

public class Main {
	
	public static void main(String[] args) {
		int cnt; //10之前的页面都不存在
		int end;
		int sleepTime = 0;
		Scanner scanner = new Scanner(System.in);
		System.out.println("起始网址编号（含）：");
		cnt = Integer.parseInt(scanner.nextLine());
		System.out.println("终止网址编号（含）：");
		end = Integer.parseInt(scanner.nextLine());
		System.out.println("间隔时间：ms");
		String sSleepTime =scanner.nextLine();
		scanner.close();

		if(sSleepTime.length()==0){
			sleepTime = 2000;
		}else {
			sleepTime = Integer.parseInt(sSleepTime);
		}

		String baseUrlPrefix = "https://www.gushiwen.org/shiwen/default_3A666666666666A";
		String baseUrlSuffix = "aspx";
		do {
			List<PoemInfo> poemInfoList = PullFromWeb.pull(baseUrlPrefix+cnt+cnt+baseUrlSuffix);
			PushIntoFile.push(poemInfoList);
			System.out.println("正在爬取第"+cnt+"个页面");
			cnt++;
			try {
				Thread.sleep(sleepTime);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}while(cnt<=end);//通过设置cnt来设置爬取上界
	}
}