package com.zyx.crawlerdemo.excelprocessor.crawler;

import jxl.write.WriteException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

/**
 * @author Yaxi.Zhang
 * @since 2021/8/12 12:08
 * desc: 爬取文件写入Excel案例
 */
public class CrawlerTest {

	public static void main(String[] args) 
			throws IOException, WriteException {
		// 待爬URL列表
		List<String> urlList = new ArrayList<>();
		urlList.add("https://www.pythonforbeginners.com/?page=1");
		urlList.add("https://www.pythonforbeginners.com/?page=2");
		urlList.add("https://www.pythonforbeginners.com/?page=3");
		List<PostModel> datalist = new ArrayList<>();
		// 获取数据
		for (String s : urlList) {
			datalist.addAll(crawerData(s));
		}
		// 存储数据
		DataToExcelByJxl.writeInfoListToExcel(
				"data/post.xls","sheet1",datalist);
		DataToExcelByPoi.writeInfoListToExcel(
				"data/post1.xlsx","sheet1",datalist);
	}

	static List<PostModel> crawerData(String url) throws IOException{
		// 所爬数据封装于集合中
		List<PostModel> datalist = new ArrayList<>(1 << 4);
		// 获取URL对应的HTML内容
		Document doc = Jsoup.connect(url).timeout(30000).get();
		// 定位需要采集的每个帖子
		Elements elements = doc.select("ul[class=nav nav-list]")
				.select("li[class=hentry]");
		// 遍历每一个帖子
		for (Element ele : elements) {
			String postTitle = ele.select(" h2 > a").text();
			String postContent = ele
					.select("div[class=post-bodycopy cf]").text();
			// 创建对象和封装数据
			PostModel model = new PostModel();
			model.setPostTitle(postTitle);
			model.setPostContent(postContent);
			datalist.add(model);
		}
		return datalist;
	}

}
