package com.dongman.scrapy;

import cn.wanghaomiao.xpath.model.JXDocument;
import cn.wanghaomiao.xpath.model.JXNode;
import com.dongman.model.Constants;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

import java.util.ArrayList;
import java.util.List;

public class CrawAllLink {

    public static List<String> getOnePageLinks(String url) {

        List<String> onePageLInks = new ArrayList<>();

        // 2.通过jsoup发送请求获取网页源码
        Connection conn = Jsoup.connect(url);
        // 3.伪造User-Agent(模拟一个真实的浏览器访问)
        conn.header("User-Agent", CrawlUtil.getUserAgent());
        // 4.获取网页源码并转换成文档树结构
        Document document;
        try {
            document = CrawlUtil.connectOneMoreTimes(conn, 3);
            //5.将document转换成xpath能解析的文档树结构
            JXDocument jdoc = new JXDocument(document);

            // 6.使用xpath语法来解析html文档来完成数据清洗
            List<JXNode> elNodes = jdoc.selN("//p[@class=comic__title]/a");
            for (int i = 0; i < elNodes.size(); i++) {
                onePageLInks.add(Constants.domain + elNodes.get(i).sel("/@href").get(0).getTextVal());
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return onePageLInks;
    }

    /**
     * 获取所有的小说连接
     * 连载
     * 完结
     */
    public static List<String> getAllLinksByUrl(String url) {
        // 2.通过jsoup发送请求获取网页源码
        Connection conn = Jsoup.connect(url);
        // 3.伪造User-Agent(模拟一个真实的浏览器访问)
        conn.header("User-Agent", CrawlUtil.getUserAgent());

        //获取总的页数
        int totalPage = getAllPage(url);

        List<String> allLinks = new ArrayList<>();
        for (int j = 1; j < totalPage + 1; j++) {
            System.out.println("page::" + j);
            try {
                Thread.sleep(Constants.sleepSeconds);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            String newUrl = url + "/page/" + j;
            allLinks.addAll(getOnePageLinks(newUrl));
        }
        return allLinks;
    }

    /**
     * 根据连接抓取总的页数
     *
     * @param url
     * @return
     */
    public static int getAllPage(String url) {
        // 2.通过jsoup发送请求获取网页源码
        Connection conn = Jsoup.connect(url);
        // 3.伪造User-Agent(模拟一个真实的浏览器访问)
        conn.header("User-Agent", CrawlUtil.getUserAgent());
        int totalPage = 1;
        // 4.获取网页源码并转换成文档树结构
        Document document;
        try {
            document = CrawlUtil.connectOneMoreTimes(conn, 3);
            //5.将document转换成xpath能解析的文档树结构
            JXDocument jdoc = new JXDocument(document);
            try {

                String[] els = jdoc.selN("//a[text()=末页]/@href").get(0).getTextVal().split("/");

                totalPage = Integer.valueOf(els[els.length - 1]);
                return totalPage;
            } catch (Exception e) {
            }
        } catch (Exception e) {
            return totalPage;
        }
        return totalPage;
    }

    public static void main(String[] args) {
        getAllLinksByUrl(Constants.all_link);
    }
}
