package base.httpclient.demo02;

import org.apache.http.HttpEntity;
import org.apache.http.HttpStatus;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.HttpClientUtils;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;

/**
 * 通过 jsoup 爬取中国税务报网站内容。
 * http://www.ctaxnews.net.cn/paper/pc/layout/202109/15/node_01.html （免费）
 * http://www.ctaxnews.net.cn/paper/pc/layout/202109/15/node_05.html（收费）
 */
public class Client {

    ArrayList<String> urlList = new ArrayList<>();
    static HashMap<String,String> map = new HashMap<>(16);

    public static void main(String[] args) {
        Client client = new Client();
//        System.out.println(testCase2("http://www.ctaxnews.net.cn/paper/pc/layout/202109/15/node_01.html"));
        testCase3("http://www.ctaxnews.net.cn/paper/pc/layout/202109/15/node_01.html");
//        testCase4("http://www.ctaxnews.net.cn/paper/pc/layout/202109/15/node_05.html");
    }


    /**
     * HTTPClient 爬源码
     * 返回 HTML 源码
     * 代理IP查询：http://www.ip3366.net/
     */
    public static String testCase2(String url){
        //1.生成httpclient，相当于该打开一个浏览器
        CloseableHttpClient httpClient = HttpClients.createDefault();
        CloseableHttpResponse response = null;
        //2.创建get请求，相当于在浏览器地址栏输入 网址
        HttpGet request = null;
        if (url == null) {
//            request = new HttpGet("https://www.cnblogs.com/");
            request = new HttpGet("https://www.tuicool.com/");// 该网站不允许爬虫爬源码
        }else {
            request = new HttpGet(url);
        }

        request.setHeader("User-Agent","Mozilla/5.0");// 将爬虫伪装成浏览器
//        HttpHost proxy = new HttpHost("175.7.199.218", 3256);//设置代理IP，通过代理IP去访问，如果代理IP不能用，就的重新换个
//        RequestConfig config = RequestConfig.custom().setProxy(proxy).build();
//        request.setConfig(config);

        try {
            //3.执行get请求，相当于在输入地址栏后敲回车键
            response = httpClient.execute(request);

            //4.判断响应状态为200，进行处理
            if(response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) {
                //5.获取响应内容
                HttpEntity httpEntity = response.getEntity();
                String html = EntityUtils.toString(httpEntity, "utf-8");
                return html;
            } else {
                //如果返回状态不是200，比如404（页面不存在）等，根据情况做处理，这里略
                System.out.println("返回状态不是200");
                return EntityUtils.toString(response.getEntity(), "utf-8");
            }
        } catch (ClientProtocolException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
            //6.关闭
            HttpClientUtils.closeQuietly(response);
            HttpClientUtils.closeQuietly(httpClient);
        }
        return null;
    }

    /**
     * Jsoup 对 HTTPClient 爬出的源码进行解析
     */
    public static void testCase3(String url){
        String html = testCase2(url);
        Document document = Jsoup.parse(html);
        Element scroLeft = document.getElementById("ScroLeft");
        Elements newslist = scroLeft.getElementsByClass("newslist");
        Element news = newslist.get(0);
        Elements li = news.getElementsByTag("li");
        // 循环文章列表
        for (Element element : li) {
            Elements h3_a = element.select("h3 a");
            System.out.println("标题："+h3_a.text());
            System.out.println("超链接："+h3_a.attr("href"));
            String href = h3_a.attr("href").replace("../../../", "http://www.ctaxnews.net.cn/paper/pc/");
            map.put(h3_a.text(),href);
            Elements p = element.select("p");
            System.out.println("简短内容："+p.text());
            System.out.println("==========================================");
        }
    }

    /**
     * 遍历每篇文章的 url，将每篇文章写入Word文档
     */
    public static void testCase4(String url){
        testCase3(url);
        Set<Map.Entry<String, String>> entries = map.entrySet();
        for (Map.Entry<String, String> entry : entries) {
            // 标题
            String key = entry.getKey();
            // URL 地址
            String value = entry.getValue();
            System.out.println(key+"-------------------------->"+value);

            String fileName = "files/jsoup/"+key+".docx";
            File file = new File(fileName);
            try(FileOutputStream fos = new FileOutputStream(fileName);
                BufferedOutputStream bos = new BufferedOutputStream(fos);
                PrintStream ps = new PrintStream(bos)) {

                // 获取文章内容
                String txt = getTxt(value);
                ps.print(txt);

            }catch (Exception e){
                e.printStackTrace();
            }
        }
    }

    public static String getTxt(String url){
        String html = testCase2(url);
        Document document = Jsoup.parse(html);
        Element newsconright1 = document.getElementsByClass("newsconright1").get(0);
        String text = newsconright1.text();
        return text;
    }

}
