package com.zyx.crawlerdemo.fileprocessor.crawler.pythonforbeginners;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.List;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;


public class CrawlerTest {

	public static void main(String[] args) throws IOException {
		//待爬URL列表
		List<String> urlList = new ArrayList<String>();
		urlList.add("https://www.pythonforbeginners.com/?page=1");
		urlList.add("https://www.pythonforbeginners.com/?page=2");
		urlList.add("https://www.pythonforbeginners.com/?page=3");
		//缓冲流的创建,以gbk写入文本
		BufferedWriter writer = new BufferedWriter( 
				new OutputStreamWriter( 
						new FileOutputStream( 
								new File("crawldata/"
										+ "pythonforbeginners.txt")),"gbk"));
		for (String s : urlList) {
			List<PostModel> data = crawerData(s);
			for (PostModel model : data) {
				//所爬数据写入文本
				writer.write(model.getPost_title() + "\t" + model.getPost_content() + "\r\n");
			}
		}
		
		//流的关闭
		writer.close();
	}
	static List<PostModel> crawerData(String url) throws IOException{
		//所爬数据封装于集合中
		List<PostModel> datalist = new ArrayList<PostModel>();
		//获取URL对应的HTML内容
		Document doc = Jsoup.connect(url).timeout(30000).get();
		//定位需要采集的每个帖子
		Elements elements = doc.select("ul[class=nav nav-list]")
				.select("li[class=hentry]");
		//遍历每一个帖子
		for (Element ele : elements) {
			//解析数据
			String post_title = ele.select(" h2 > a").text();
			String post_content = ele
					.select("div[class=post-bodycopy cf]").text();
			//创建对象和封装数据
			PostModel model = new PostModel();
			model.setPost_title(post_title);
			model.setPost_content(post_content);
			datalist.add(model);
		}
		return datalist;
	}
}
