import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.net.URL;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;

public class Demo {
    public static void main(String[] args){
        new Demo().crawlDesc();
//        TODO 后续老师有兴趣的话可以制作成idea小说插件等工具,用户自己根据html解析frame、nodeWarp、node信息,可以参考springcloudconfig、springcloudbus和git仓库搭建线上配置中心
    }

    private void crawlDesc(){
        String URI = "https://www.9biqu.com";
        URL url = null;
        Document document = null;
        try {
            url = new URL("https://www.9biqu.com/biquge/25530/");
            document = Jsoup.parse(url, 30 * 1000);
        } catch (Exception e) {
            e.printStackTrace();
        }

//        总的章节列表id
        String frame = "list";
//        在frame下面的包含所有章节列表的元素标签
        String nodeWarp = "dl";
//        真正的章节列表的元素标签
        String node = "dd";

        String[] ua = {"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36 OPR/37.0.2178.32",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586",
                "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
                "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
                "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
                "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0)",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 BIDUBrowser/8.3 Safari/537.36",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36 Core/1.47.277.400 QQBrowser/9.4.7658.400",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 UBrowser/5.6.12150.8 Safari/537.36",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36 SE 2.X MetaSr 1.0",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36 TheWorld 7",
                "Mozilla/5.0 (Windows NT 6.1; W…) Gecko/20100101 Firefox/60.0"};

//        获取description
        String description = document.select("meta[property=og:description]").attr("content");
        System.out.println(description);

//        获取image
        String image = document.select("meta[property=og:image]").attr("content");
        System.out.println(URI + image);

//        获取category
        String category = document.select("meta[property=og:book:category]").attr("content");
        System.out.println(category);

//        获取bookName
        String bookName = document.select("meta[property=og:book:book_name]").attr("content");
        System.out.println(bookName);

//        获取author
        String author = document.select("meta[property=og:book:author]").attr("content");
        System.out.println(author);

//        获取到id未list的总章节列表
        Element list = document.getElementById(frame);
//        设置boolean原子变量,匹配到第一章后获取链接
        AtomicBoolean flag = new AtomicBoolean(false);
//        获取list标签中的dl所有标签
        Elements dl = list.select(nodeWarp);
//        获取第一个dl标签
        Element element = dl.get(0);
//        获取dl标签中的所有元素
        Elements allElements = element.getAllElements();
//        删除第一个总标签
        Element remove = allElements.remove(0);


//        获取章节名字和链接
        allElements.forEach(el->{
            if(flag.get() && !el.is(node)){
//                el.text()是章节名,后面的是链接
                String bookURL = URI + el.select("a").attr("href");
                System.out.println(el.text() + "=======>" + bookURL);
                try {
//                    设置爬取小说内容时间间隔为10秒
                    Thread.sleep(10*1000);
                    int i = new Random().nextInt(ua.length);
                    Document b = Jsoup.connect(bookURL).header("referer", "https://www.baidu.com/").userAgent(ua[i]).timeout(30*1000).get();
                    Elements content = b.getElementsByClass("content_detail");
                    System.out.println(content.text());
                } catch (IOException e) {
                    try {
//                        对方网站开启了ip限制反爬虫之后提示
                        throw new Exception("IP封锁,爬取文章内容失败");
                    } catch (Exception exception) {
                        exception.printStackTrace();
                    }
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
            if(el.text().contains("正文")){
                flag.set(true);
            }
        });
    }
}
