package com.zouxian.processor;

/*
 * 环球网
 */
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;

import com.zouxian.pipeline.MyFilePipeline;
import com.zouxian.pojo.News;
import com.zouxian.util.ListToString;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.downloader.selenium.SeleniumDownloader;
import us.codecraft.webmagic.pipeline.ConsolePipeline;
import us.codecraft.webmagic.pipeline.FilePipeline;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.selector.Selectable;

public class MyProcessor5 implements PageProcessor {
	// 配置
	private Site site = Site.me().setRetryTimes(3).setSleepTime(100);
	private static int count = 0;

	public Site getSite() {
		// TODO Auto-generated method stub
		return site;
	}

	public void process(Page page) {

		// http://china.huanqiu.com/article/2018-09/13066128.html
		// http://world.huanqiu.com/exclusive/2018-09/13065432.html
		Boolean flag = page.getUrl().regex("http://[a-z]+.huanqiu.com/[0-9 a-z]+/[0-9 -]+/[0-9]+.html").match();
		if (!flag) {
			page.addTargetRequests(
					page.getHtml().links().regex("http://[a-z]+.huanqiu.com/[0-9 a-z]+/[0-9 -]+/[0-9]+.html").all());
		}
		// 获取所有满足条件的url

		// System.err.println(page.getUrl());
		count++;
		try {
			Thread.sleep(5000);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
		// 获取url
		String url = page.getUrl().get();
		page.putField("url", url);
		// 获取标题

		String title = page.getHtml().$(".tle", "text").toString();
		if (title == null) {
			page.setSkip(true);

		}
		page.putField("title", title);
		// 获取关键字
		/*
		 * String
		 * keyword=page.getHtml().$(".label__link-wrapper","text").toString();
		 * page.putField("keyword", keyword);
		 */
		// 获取页面编码
		String charset = page.getCharset();
		page.putField("charset", charset);
		// 获取日期
		String putTime = page.getHtml().$(".la_t_a", "text").toString();
		page.putField("data", putTime);
		// 获取来源
		String resource = page.getHtml().$(".la_t_b a", "text").toString();
		page.putField("resource", resource);
		// 获取作者
		String author = page.getHtml().$(".la_t_c span", "text").toString();
		page.putField("author", author);
		// 获取参与人数
		String canyu = page.getHtml().$(".participate var", "text").toString();
		page.putField("canyu", canyu);
		// 获取正文
		String content = page.getHtml().$(".la_con p", "text").all().toString().replaceAll("\\s*", "");
		page.putField("content", content);
		// 获取分类板块
		String SubColum = page.getHtml().$(".nav_left a", "text").all().toString();
		page.putField("SubColum", SubColum);

	}

	// .setDownloader(new
	// SeleniumDownloader("C:\\Users\\zx\\Downloads\\chromedriver.exe"))
	public static void main(String[] args) {
		long startTime, endTime;
		System.out.println("开始爬取...");
		startTime = System.currentTimeMillis();
		Spider.create(new MyProcessor5()).addUrl("https://www.huanqiu.com").addPipeline(new ConsolePipeline())
				.addPipeline(new MyFilePipeline())
				.setDownloader(new SeleniumDownloader("C:\\Users\\zx\\Downloads\\chromedriver.exe")).thread(5).run();
		endTime = System.currentTimeMillis();
		System.out.println("爬取结束，耗时约" + ((endTime - startTime) / 1000) + "秒，抓取了" + count + "条记录");

	}

}
