package com.zouxian.processor;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;

import com.zouxian.pojo.News;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;

public class MyProcessor2  implements PageProcessor{
	//配置
	private Site site = Site.me().setRetryTimes(3).setSleepTime(100);
	private static int count=0;
	public Site getSite() {
		// TODO Auto-generated method stub
		return site;
	}

	public void process(Page page) {
		//https://news.163.com/18/0918/09/DRVQFG2M0001875O.html
		//https://news.163.com/18/0918/10/DRVQSLK30001875P.html
		////*[@id="index2016_wrap"]/div[1]/div[2]/div[2]/div[3]/div[2]/div[5]/div/ul/li[1]
		Boolean flag=page.getUrl().regex("https://news.163.com/[0-9]{2}/[0-9]{4}/[0-9]{2}/[A-Z 0-9 -]+.html").match();
		if(!flag){
			//获取所有满足条件的url
			page.addTargetRequests(page.getHtml().links().regex("https://news.163.com/[0-9]{2}/[0-9]{4}/[0-9]{2}/[A-Z 0-9 -]+.html").all());
			//获取满足条件的url
			/*page.addTargetRequests(page.getHtml().xpath("//*[@id=\"js_top_news\"]/h2/a/@href").all());
			page.addTargetRequests(page.getHtml().xpath("//*[@id=\"js_top_news\"]/ul/li/a/@href").all());
			page.addTargetRequests(page.getHtml().xpath("//*[@class=\"newsdata_item\"]/div/div/div/h3/a/@href").all());*/
		}else{
			
			//获取url
			String url=page.getUrl().get();
			//获取标题
			String title=page.getHtml().xpath("//*[@id=\"epContentLeft\"]/h1/text()").get();
			if(title==null){
				page.setSkip(true);
				page.putField("", null);
			}
			//获取内容
			//String content=page.getHtml().xpath("//*[@id=\"endText\"]/p/text()").get();
			//获取日期和地址
			String dataAndAddress=page.getHtml().xpath("//*[@id=\"epContentLeft\"]/div[@class='post_time_source']/text()").get();
			//获取跟帖
			String tiecount=page.getHtml().xpath("//*[@class='post_comment_tiecount']/a/text()").get();
			//获取所属板块
			String genre=page.getHtml().xpath("//*[@class='clearfix']/div/a[3]/text()").get();
			//获取来源网站
			String resource=page.getHtml().xpath("//*[@id=\"epContentLeft\"]/div[@class='post_time_source']/a/text()").get();
			//截取日期  2018-09-18 10:28:03   [0-9]{4}[-][0-9]{1,2}[-][0-9]{1,2}[ ][0-9]{1,2}[:][0-9]{1,2}[:][0-9]{1,2}
			Pattern pattern=Pattern.compile("[0-9]{4}[-][0-9]{1,2}[-][0-9]{1,2}[ ][0-9]{1,2}[:][0-9]{1,2}[:][0-9]{1,2}");
			String data=null;
			if(dataAndAddress!=null){//日期格式可能不一样
				Matcher matcher=pattern.matcher(dataAndAddress);
				if(matcher.find()){
					data=matcher.group();
				}
			}
			//News news=new News(url, title, data, resource, genre);
			
			System.out.println("新闻链接："+url);
			System.out.println("新闻标题："+title);
			System.out.println("新闻发布日期："+data);
			//System.out.println("新闻内容："+content);
			System.out.println("新闻来源:"+resource);
			//System.out.println("跟帖数："+tiecount);
			System.out.println("所属板块："+genre);
			/*try {用FileUtils写出
				String fileName="t"+data.replaceAll("-", "").substring(0,8)+"_"+(new Random().nextInt(900000)+100000);
				FileUtils.writeStringToFile(new File("D:\\news\\"+fileName), news.toString(), "utf-8");
			} catch (IOException e) {
				e.printStackTrace();
			}*/
			String fileName="t"+data.replaceAll("-", "").substring(0,8)+"_"+(new Random().nextInt(900000)+100000);
			try {
				BufferedWriter bw=new BufferedWriter(new FileWriter(new File("D:\\news\\"+fileName+".txt")));
				bw.write("url:"+url);
				bw.newLine();
				bw.write("title:"+title);
				bw.newLine();
				bw.write("data:"+data);
				bw.newLine();
				bw.write("resource:"+resource);
				bw.newLine();
				bw.write("genre;"+genre);
				bw.close();
				
			} catch (IOException e) {
				e.printStackTrace();
			}
			count++;
		}
	}
	
	public static void main(String[] args) {
		long startTime, endTime;
		System.out.println("开始爬取...");
		startTime = System.currentTimeMillis();
		Spider.create(new MyProcessor2()).addUrl("https://news.163.com/").thread(5).run();
		endTime = System.currentTimeMillis();
		System.out.println("爬取结束，耗时约" + ((endTime - startTime) / 1000)
				+ "秒，抓取了" + count + "条记录");
		
	}

}
