package com.ylwj;

import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;

import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * 使用httpclient实现的一个简单爬虫
 * 
 * @author wu
 *
 */
public class Crawler {
	// 需要爬取的域名，用于相对路径拼接，默认爬取青岛商务局
	private String domain = "http://www.qdbofcom.gov.cn";
	// 需要爬取的起始页面
	private String startUrl = domain + "/n32207979/n32207980";
	// HttpClient实例
	private CloseableHttpClient httpClient = HttpClients.createDefault();

	public List<String> run() throws ClientProtocolException, IOException {
		List<String> al = new ArrayList<String>();
		List<String> indexUrlList = getIndexUrlList();
		List<String> urlList = new ArrayList<String>();
		urlList = getUrlList(indexUrlList);
		for (String url : urlList) {
			al.add(getContentDetail(url));
		}
		httpClient.close();
		return al;
	}

	public List<String> getIndexUrlList() {
		List<String> indexUrlList = new ArrayList<String>();
		indexUrlList.add(startUrl + "/index.html");
		for (int i = 2; i <= 30; i++) {
			String indexUrl = startUrl + "/index_" + i + ".html";
			// System.out.println(indexUrl);
			indexUrlList.add(indexUrl);
		}
		return indexUrlList;
	}

	public List<String> getUrlList(List<String> indexUrlList) throws ClientProtocolException, IOException {
		List<String> urlList = new ArrayList<String>();
		for (String indexUrl : indexUrlList) {
			// 创建httpget实例
			HttpGet httpGet = new HttpGet(indexUrl);
			CloseableHttpResponse response = httpClient.execute(httpGet);
			HttpEntity entity = response.getEntity();
			String content = EntityUtils.toString(entity, "utf-8");
			// 获取网页内容，指定编码
			Document doc = Jsoup.parse(content);
			Elements elements = doc.getElementsByClass("con-list").get(0).getElementsByTag("a");
			for (Element element : elements) {
				String url = element.attr("href");
				// 相对路径绝对路径的处理，此处只处理了相对路径
				if (url.startsWith("/")) {
					url = domain + url;
					urlList.add(url);
				} else {
					// http://lanmu.qtv.com.cn/system/2018/03/26/014853875.shtml意外情况
					// urlList.add(url);
				}

			}
			response.close();
		}
		return urlList;
	}

	/**
	 * 获取文章内容富文本
	 * 
	 * @param url
	 * @return
	 * @throws ClientProtocolException
	 * @throws IOException
	 */
	public String getContentDetail(String url) throws ClientProtocolException, IOException {
		HttpGet httpGet = new HttpGet(url);
		CloseableHttpResponse response = httpClient.execute(httpGet);// 记得关闭
		HttpEntity entity = response.getEntity();
		// 获取网页html
		String html = EntityUtils.toString(entity, "utf-8");
		Document doc = Jsoup.parse(html);
		// 获取富文本格式的文章详细信息
		String contentDetail = doc.getElementsByClass("con-detail").get(0).html();// elements为空（个数为0）
		// 文本格式校验
		// 图片路径处理
		response.close();
		String contentDetailImg = imgUrlConvert(contentDetail);
		return contentDetailImg;
	}

	/**
	 * 检验获取的文本格式
	 * 
	 * @return
	 */
	public boolean contentCheck(String contentDetail) {
		//待完善
		return true;
	}

	/**
	 * 文章中的图片相对路径转绝对路径
	 * 
	 * @param contentDetail
	 * @return
	 */
	public String imgUrlConvert(String contentDetail) {
		String s = contentDetail.replaceAll("<img src=\"", "<img src=\""+domain);
		return s;
	}

	public void writeHTML(String contentDetail, String filename) throws Exception {
		// 还需要获取title,待完善
		// 用于存储html字符串
		StringBuilder stringHtml = new StringBuilder();
		PrintStream printStream = new PrintStream(new FileOutputStream("E://test/"+filename));
		stringHtml.append("<html><head>");
		stringHtml.append("<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">");
		stringHtml.append("<title>测试</title>");
		stringHtml.append("</head>");
		stringHtml.append("<body>");
		stringHtml.append(contentDetail);
		stringHtml.append("</body></html>");
		printStream.println(stringHtml.toString());
		printStream.close();

	}

	public static void main(String[] args) throws Exception {
		Crawler crawler = new Crawler();
		List<String> contentDetailList = crawler.run();
		int index = 1;
		for (String contentDetail : contentDetailList) {
			crawler.writeHTML(contentDetail,index+".html");
			index++;
		}
	}
}
