package com.spider.huxiu;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.charset.Charset;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashSet;
import java.util.Set;

import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import com.spider.bean.Article;

/**
 * 
 * @ClassName: HuXiuSpider
 * @Description: HUXIU 新闻网爬虫
 * @author Administrator
 * @date 2018年1月30日 下午7:13:11
 */
public class HuXiuSpider {

	private static Set<Article> articles = new HashSet<Article>();
	private static final String HUXIU_URL = "https://www.huxiu.com";
	private static final String HUXIU_ARTICLE_SAVE_PATH = "E:/WorkSpace/crawler/crawler/src/main/java/com/spider/article/";
	private static final String HUXIU_ARTICLE_LIST_SAVE_PATH = "E:/WorkSpace/crawler/crawler/src/main/java/HUXIU_ARTICLE_LIST.txt";

	public static void main(String[] args) throws Exception {

		/**
		 * 虎嗅新闻网爬虫
		 */
		pickHUXIU_HomePage();

		pickWebPage();

	}



	private static void pickHUXIU_HomePage() throws Exception {
		// 爬取网页
		Document document = pickUrlByHttpGet(HUXIU_URL);

		Elements elements = document.select(".mod-info-flow>div");

		if (elements.size() >= 1) {
			// 得到h2 标签从而中文章链接,文章标题
			Elements selects = null;
			// 得到文章内部地址,后面拼接网址
			String articleHref = null;
			// 得到文章标题信息
			String articleTitle = null;
			// 得到文章作者名称信息
			String articleAuthName = null;

			Article article = null;
			// 遍历每一个文章的DIV
			for (Element element : elements) {

				article = new Article();
				// 获取判断第一个div的值是否为热点
				Elements hotTitle = element.select("div:eq(0)").select("div:eq(0)");

				String text = hotTitle.text();

				if (text.startsWith("热")) {
					selects = element.select("div:eq(3)").select("h2").select("a");
					articleAuthName = element.select("div:eq(3)").select(".author-name").text();
				} else {
					selects = element.select("div:eq(2)").select("h2").select("a");
					articleAuthName = element.select("div:eq(2)").select(".author-name").text();
				}
				articleHref = selects.attr("href");
				articleTitle = selects.text();
				String articleUrl = HUXIU_URL + articleHref;

				article.setArticleAuthName(articleAuthName);
				article.setArticleUrl(articleUrl);
				article.setArticleTitle(articleTitle);

				articles.add(article);

			}

		}
	}

	private static void pickWebPage() throws Exception {
		for (Article article : articles) {
			if (article.getArticleUrl().contains(HUXIU_URL + "/article")) {
				// 打开文章详情链接
				pickArticleUrlAnd2DB(article.getArticleUrl().trim());
			}
		}

	}

	private static void pickArticleUrlAnd2DB(String articleUrl) throws Exception {

		try {

			Document document = pickUrlByHttpGet(articleUrl);
			// article-wrap
			Elements elements = document.select(".article-wrap");

			// text 获取所有的文本
			// owntext 获取当前自己拥有的文本信息
			String articleAuthName = elements.select(".author-name").get(0).ownText().trim();

			String articleImgPath = elements.get(0).select(".article-img-box>img").attr("src").trim();

			String articlePublishTime = elements.get(0).select(".article-time").text().trim();
			String articleTitle = elements.get(0).select(".t-h1").text().trim();

			Elements articleContent = elements.get(0).select(".article-content-wrap>p");

			articleTitle = articleTitle.replace(" ", "").replace("?", "？").replace("\\", "分");

			// 保存文章内容到文件
			saveArticleContent2File(articleAuthName, articleTitle, articleContent);
			// 保存文章到数据库
			// save();
		} catch (Exception e) {
			System.out.println(articleUrl);
		}

	}

	private static void saveArticleContent2File(String articleAuthName, String articleTitle, Elements articleContent)
			throws IOException {
		File articleFile = new File(HUXIU_ARTICLE_SAVE_PATH + articleTitle + "--" + articleAuthName + ".txt");
		// 高效字符写出流,可以追加写入
		BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(articleFile, true));

		long begin = System.currentTimeMillis();

		for (Element element : articleContent) {
			bufferedWriter.write(element.text().trim());
			bufferedWriter.flush();
			bufferedWriter.newLine();
		}
		// 最后一行加入个人时间戳
		bufferedWriter.newLine();
		bufferedWriter.write("写入时间:" + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()) + "@熊骑士");
		bufferedWriter.close();

		long end = System.currentTimeMillis();

		System.out.println(articleTitle + "\t(写入完成,耗时:" + (end - begin) + "毫秒)");
	}

	private static Document pickUrlByHttpGet(String url) throws IOException, ClientProtocolException {
		// 1.创建http客户端

		CloseableHttpClient httpClient = HttpClients.createDefault();
		// 2.创建请求对象,传入网址
		HttpGet HttpGet = new HttpGet(url);
		// 2.1设置请求头,必要!
		HttpGet.addHeader("User-Agent",
				"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36");
		// 3.执行请求,得到response对象
		CloseableHttpResponse response = httpClient.execute(HttpGet);
		// 4.获取整个页面
		String html = EntityUtils.toString(response.getEntity(), Charset.forName("UTF-8"));
		// 5.jsoup解析
		return Jsoup.parse(html);

	}
}
