package com.yinxin.spiders;

import com.yinxin.util.HttpUtil;
import com.yinxin.util.JDBCUtils;
import org.apache.http.HttpEntity;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;

/**
 * @Author: YinXin
 * @Date: 2021/10/11 10:26
 * @Version: 1.0
 * @description:    爬虫的实现，爬取网页上的需要的信息。
 */
@SuppressWarnings("all")
public class spiderForJsonp {
    public static void main(String[] args) throws IOException, SQLException {
        String url = "https://tieba.baidu.com/p/5812560041";
        CloseableHttpResponse response = HttpUtil.getResponse(url);
        //获取数据库链接对象
        Connection conn = JDBCUtils.getConnection();
        PreparedStatement pstmt=null;
        //获取网页源码
        if (response.getStatusLine().getStatusCode() == 200) {
            //获取实体对象
            HttpEntity entity = response.getEntity();
            String res = EntityUtils.toString(entity, "utf-8");
            //使用Jsoup解析网页
            Document doc = Jsoup.parse(res);
            //有使用DOM方法来遍历一个文档，也有使用选择器语法来查找元素，而后者类似于CSS或jQuery的语法来查找和操作元素。

            Elements elements = doc.select("div[class~=l_post j_l_post l_post_bright.*]");
//            Elements elements = doc.select("div[id~=post_content_[0-9]*]");
            if (!elements.isEmpty()) {
                for (int i=0;i<elements.size();i++) {
                    System.out.println("第"+i+1+"条评论！");
                    Element element = elements.get(i);
                    //获取作者昵称
                    Elements author = element.select("li[class=d_name]");
                    String authortext = author.get(0).text().replace("\\s","")
                            .replace("\n","")
                            .replace("\r","")
                            .replace("\t","");
                    System.out.println(authortext);
                    //获取作者楼层
//                    Elements  lou= element.select("div[core_reply j_lzl_wrapper]");
//                    String loutext = author.get(0).text().replace("\\s","")
//                            .replace("\n","")
//                            .replace("\r","")
//                            .replace("\t","");
//                    System.out.println(loutext);
                    //获取评论内容
                    Elements pinlun = element.select("div[id~=post_content_[0-9]*]");
                    //String pinluntext = pinlun.get(0).text()=="" ? "评论只有一个【表情】":pinlun.get(0).text();
                    String pinluntext = pinlun.get(0).text().replace("\\s","")
                            .replace("\n","")
                            .replace("\r","")
                            .replace("\t","");
                    if(pinluntext==""){
                        System.out.println("评论为空不写入数据库！");
                        continue;
                    }
                    System.out.println(pinluntext);
                    System.out.println("===============================");
                    //插入信息
                    String sql ="INSERT INTO contain (id,author,pinlun) VALUES (NULL,?,?)";
                    pstmt = conn.prepareStatement(sql);
                    pstmt.setString(1,authortext);
                    pstmt.setString(2,pinluntext);
                    pstmt.execute();
                }
            } else {
                System.out.println("《《《《《 *** 没有需要的信息 *** 》》》》》");
            }
        } else {
            System.out.println("服务器连接失败！");
        }
        conn.close();
        response.close();
        JDBCUtils.close(conn,pstmt);
        HttpUtil.close();
    }
}
