package com.example.zselasticsearchsf.controller;


import com.gargoylesoftware.htmlunit.BrowserVersion;
import com.gargoylesoftware.htmlunit.NicelyResynchronizingAjaxController;
import com.gargoylesoftware.htmlunit.WebClient;
import com.gargoylesoftware.htmlunit.html.*;
import lombok.extern.slf4j.Slf4j;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;

import java.io.IOException;
import java.util.List;

/**
 * <p> 中国司法网案例库爬虫</p>
 *
 * @Author: Steven
 * @Date: 2021/10/20 7:48
 */
@RestController
@RequestMapping("zondar/reptile")
@Slf4j
public class Reptile22 {




//    private  static   String TARGET_URL = "https://aiqicha.baidu.com/?from=fc&source=aff-1007606546&bd_vid=11932980675383247393&keywords=";//全文检索
  private  static String TARGET_URL="https://www.qcc.com";
    @GetMapping("/qcc")
    public void htmlUnitSFW(@RequestParam(value = "keywords", required = false) String keywords,
                            @RequestParam(value = "pages", required = false) Integer pages) throws IOException, InterruptedException {

        if (keywords == null) {//null不可搜索
            keywords = "";
        }



        final WebClient webClient = new WebClient(BrowserVersion.CHROME);//新建一个模拟谷歌Chrome浏览器的浏览器客户端对象

        webClient.getOptions().setThrowExceptionOnScriptError(false);//当JS执行出错的时候是否抛出异常, 这里选择不需要
        webClient.getOptions().setThrowExceptionOnFailingStatusCode(false);//当HTTP的状态非200时是否抛出异常, 这里选择不需要
        webClient.getOptions().setActiveXNative(false);

        webClient.getOptions().setCssEnabled(false);//是否启用CSS, 因为不需要展现页面, 所以不需要启用
        webClient.getOptions().setJavaScriptEnabled(true); //很重要，启用JS
        webClient.setAjaxController(new NicelyResynchronizingAjaxController());//很重要，设置支持AJAX
        webClient.getOptions().setUseInsecureSSL(true);//ssl安全访问


        HtmlPage page = null;
            page = webClient.getPage(TARGET_URL);//尝试加载上面图片例子给出的网页


            HtmlInput loginId = page.getHtmlElementById("searchKey");
            loginId.setValueAttribute(keywords);//关键词
            Thread.sleep(5000);//todo !!! 不睡眠加载不出
            //模拟点击检索
//        page.getHtmlElementById("su").click();
       HtmlElement htmlElement = page.getHtmlElementById("page-header");

        Thread.sleep(5000);//todo !!!不睡眠加载不出

            DomElement searchResultDiv = (DomElement) (page.getHtmlElementById("company-list").getLastElementChild());

            System.out.println("-----------------------1--------------------------------");
            System.out.println(searchResultDiv);
            System.out.println("-----------------------2--------------------------------");
            System.out.println(page.getHtmlElementById("searchResultDiv").getByXPath("//div[@class='pagenav']/a"));
            List<HtmlAnchor> htmlAnchorList = page.getHtmlElementById("searchResultDiv").getByXPath("//div[@class='pagenav']/a");
            System.out.println(htmlAnchorList.get(htmlAnchorList.size() - 2));
            Document doc = Jsoup.parse(htmlAnchorList.get(htmlAnchorList.size() - 2).asXml());
            Integer totalNum = Integer.valueOf(doc.getElementsByTag("a").text());


            log.info(">>>>>>>>>>>>>>搜索关键词:【{}】，搜索结果：【{}】页", keywords, totalNum);
            for (int j=1;j<=totalNum;j++){
                String JavaScrip="searchPageNew('"+j+"')";
                page.executeJavaScript(JavaScrip);
                Thread.sleep(5000);//todo !!!不睡眠加载不出
                log.info("第>>>>>>>>>>>>>>>【{}】页", j);



        webClient.waitForBackgroundJavaScript(30000);//异步JS执行需要耗时,所以这里线程要阻塞30秒,等待异步JS执行结束

        String pageXml = page.asXml();//直接将加载完成的页面转换成xml格式的字符串
        //TODO 下面的代码就是对字符串的操作了,常规的爬虫操作,用到了比较好用的Jsoup库

        Document document = Jsoup.parse(pageXml);//获取html文档
        System.out.println("==========================");
//        System.out.println(document);
        List<Element> infoListEle = document.getElementById("searchResultDiv").getElementsByTag("tbody").first().getElementsByTag("tbody").first().getElementsByTag("tr");//获取元素节点等
        List<Element> infoListEle2 = document.getElementById("searchResultDiv").getElementsByAttributeValue("class", "pagenav");//获取元素节点等


        infoListEle2.forEach(element -> {
            System.out.println(element.getElementsByTag("a").text());
            System.out.println(element.getElementsByTag("a").attr("href"));
        });

        infoListEle.forEach(element -> {

            List<Element> infoListEle1 = element.getElementsByTag("td");//获取元素节点等

           /* for (int i = 0; i <= infoListEle1.size() - 1; i++) {
                caseInfoBean.setSybiosis("~~~简介");
                caseInfoBean.setThumbnail("缩略图字段");

                System.out.println(infoListEle1.get(i).getElementsByTag("td").first().text());
                if (i == 0) {
                    System.out.println("http://alk.12348.gov.cn" + infoListEle1.get(i).getElementsByTag("td").first().getElementsByTag("a").attr("href"));
                    sfwDto.setCaseTittlDateUtilse(infoListEle1.get(i).getElementsByTag("td").first().text());
                    caseInfoBean.setCaseTitle(infoListEle1.get(i).getElementsByTag("td").first().text());

//                    sfwDto.setCaseUrl("http://alk.12348.gov.cn" + infoListEle1.get(i).getElementsByTag("td").first().getElementsByTag("a").attr("href"));
                    sfwDto.setCaseUrl(infoListEle1.get(i).getElementsByTag("td").first().getElementsByTag("a").attr("href"));
//                    caseInfoBean.setContext(("http://alk.12348.gov.cn" + infoListEle1.get(i).getElementsByTag("td").first().getElementsByTag("a").attr("href")));
                    // TODO: 2021/10/21 继续跳转页面，爬取文章 内容
                    String newUrl="http://alk.12348.gov.cn" + infoListEle1.get(i).getElementsByTag("td").first().getElementsByTag("a").attr("href");
//                   String id
                    try {
                    *//* 此方法获取  Document 会一直在这一页循环
                    HtmlPage pageDetail  = webClient.getPage(newUrl);//尝试加载上面图片例子给出的网页
                        String DetailPageXml= pageDetail.asXml();//直接将加载完成的页面转换成xml格式的字符串 下面的代码就是对字符串的操作了,常规的爬虫操作,用到了比较好用的Jsoup库
                        Document DetailDocument = Jsoup.parse(DetailPageXml);//获取html文档*//*
                       Document DetailDocument = Jsoup.parse(new URL(newUrl), 30000);

                        List<Element> infoListEle2w= DetailDocument.getElementsByAttributeValue("class","wzinfo");//获取元素节点等
                        System.out.println("详情页==================");
                        System.out.println( infoListEle2w.get(0).html());
                        caseInfoBean.setContext(infoListEle2w.get(0).html());

                    } catch (IOException e) {
                        e.printStackTrace();
                    }


                }
            }*/



//            pcontextInfoBean.setName();



//            createDocByUtils3(pcontextInfoBean);


        });

            }
    }











}
