package com.ybox.crawler.service;

import com.ybox.common.core.exception.ServiceException;
import com.ybox.common.core.utils.StringUtils;
import com.ybox.common.core.utils.UrlSpiltUtils;
import com.ybox.common.core.utils.uuid.IdUtils;
import com.ybox.crawler.domain.Article;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import us.codecraft.webmagic.*;
import us.codecraft.webmagic.pipeline.Pipeline;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.selector.Html;
import java.io.*;
import java.net.URL;
import java.net.URLConnection;
import java.util.HashMap;
import java.util.Map;


/**
 * @Author ych
 * @create 2023-2023/4/2-11:22
 */
public class WechatCrawlerAll implements PageProcessor {

    private static String URL = "https://mp.weixin.qq.com/s/NjGK4Vc5zOCCc4Apyn5fVQ";

    /**
     * 解析页面
     *
     * @param page
     */
    @Override
    public void process(Page page) {
        Html html = page.getHtml();
        Document document = html.getDocument();

        String startHtml = "<html><head>";
        String endHtml = "</body></html>";
        // 将微信公众号的css样式提前保存在服务器当中
        String css = "<link  href=\"http://127.0.0.1:9000/ybox-0/css/wechat.css?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=Z93PB1EIFWA729YLY4MJ%2F20230404%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230404T092200Z&X-Amz-Expires=604800&X-Amz-Security-Token=eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiJaOTNQQjFFSUZXQTcyOVlMWTRNSiIsImV4cCI6MTY4MDY0MjUxMSwicGFyZW50IjoibWluaW9hZG1pbiJ9.MLc-TwaVI-KNdEYqTHYfgI8ssc9VxLgCLF_16_8Q3sMSGRe084IstpjeDYBMFJEYqzFb9jrN7EigsoBzt-j9Yg&X-Amz-SignedHeaders=host&versionId=34b964c7-06c8-4ade-83f8-a1be941add9d&X-Amz-Signature=8f24505845d9d99a36325518e83dc5f4d8a04a38e679798d8c497522e4ef33d7\" rel=\"stylesheet\" type=\"text/css\"/>";
        String endHead = "</head><body id=\"activity-detail\" class=\"zh_CN mm_appmsg  appmsg_skin_default appmsg_style_default \"><div id=\"js_article\" class=\"rich_media\">  <div id=\"js_top_ad_area\" class=\"top_banner\"></div><div class=\"rich_media_inner\"><div id=\"page-content\" class=\"rich_media_area_primary\"> <div class=\"rich_media_area_primary_inner\">";

        // 保存原网页
        // byte bytes[] = new byte[512];
        // bytes = html.toString().getBytes();
        // save2File("D:/java/doc.html",bytes);

        Elements meta = document.select("meta");
        if (!StringUtils.equals("微信公众平台", meta.get(16).attr("content"))) {
            throw new ServiceException();
        }

        // 因为公众号HTML格式比较固定，所以可以写死
        String viewPort = meta.get(4).toString();
        String charset = meta.get(1).toString();
        Elements elements = document.select("img");
        HashMap<String, String> map = new HashMap<>(32);
        String path = null;
        for (Element element : elements) {
            String a = element.attr("data-src");
            if (a != null && !"".equals(a)) {
                if (map.get(a) == null || "".equals(map.get(a))) {
                    // 如果是base64图片
                    if (a.startsWith("data:image/")){
                        map.put(a, a);
                        continue;
                    }
                    try {
                        // 下载图片
                        if (UrlSpiltUtils.getUrlMap(a).isEmpty()){
                            String[] split = a.split("/");
                            int length = split.length;
                            String s = split[length - 1].split("\\.")[1];
                            s = s.replace("?","");
                            path = download(a, s);
                        }else {
                            path = download(a, UrlSpiltUtils.getUrlMap(a).get("wx_fmt"));
                        }
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                    map.put(a, path);
                }
            }
        }
        Elements title = document.getElementsByClass("rich_media_title");
        Elements content = document.getElementsByClass("rich_media_content");
        Elements author = document.getElementsByClass("rich_media_meta_list");
        // String authorString = author.toString();
        // 将日期删除掉
        // authorString = authorString.replaceFirst("(<em)(.*?)(</em>)","");
        String contentHtml = content.toString();
        // 不知道为啥解析出来的HTML visible是hidden，所以这里替换一下
        contentHtml = contentHtml.replaceFirst("hidden", "visible");
        for (Map.Entry<String, String> entry : map.entrySet()) {
            contentHtml = contentHtml.replace(entry.getKey(), entry.getValue());
        }
        contentHtml = contentHtml.replace("data-src", "src");
        String resultHtml = startHtml + css + charset + viewPort + endHead + title + author + contentHtml + endHtml;
        String authors = meta.get(11).attr("content");
        String titles = meta.get(12).attr("content");
        String img = meta.get(14).attr("content");
        String description = meta.get(10).attr("content");
        String source = document.getElementById("js_name").text();
        Article article = new Article(titles, authors, URL, resultHtml, img, source, description);
        // 保存处理后的数据
        page.putField("article", article);

        System.out.println("爬取文章完成！！");
    }

    private Site site = Site.me()
            // 设置编码
            .setCharset("utf8")
            // 设置超时时间
            .setTimeOut(10 * 1000)
            // 设置重试时间
            .setRetrySleepTime(10000)
            // 设置重试次数
            .setRetryTimes(5);

    @Override
    public Site getSite() {
        return site;
    }


    /**
     * 获取html图片
     *
     * @param urlString
     * @param suffix
     * @return
     * @throws Exception
     */
    public static String download(String urlString, String suffix) throws Exception {
        // 获取URL并构造URL
        URL url = new URL(urlString);
        // 打开URL连接
        URLConnection con = url.openConnection();
        // 定义输入流
        InputStream is = con.getInputStream();
        // 定义1K的数据缓冲
        byte[] bs = new byte[1024];
        // 读取到的数据长度
        int len;
        /**
         *
         * 设置输出的文件流并设置下载路径及下载图片名称
         */
        String id = IdUtils.fastUUID();
        String filename = "D:\\java\\img\\" + id + "." + suffix;
        File file = new File(filename);
        FileOutputStream os = new FileOutputStream(file, true);
        // 开始读取
        while ((len = is.read(bs)) != -1) {
            os.write(bs, 0, len);
        }
        // 下载完毕，关闭所有链接
        os.close();
        is.close();
        String finalName = "./img/" + id + "." + suffix;
        return finalName;
    }


    /**
     * 主函数执行爬虫
     *
     * @param args
     */
    public static void main(String[] args) {
        Spider.create(new WechatCrawlerAll())
                // 设置爬取的数据页面
                .addUrl(URL)
                .addPipeline(new Pipeline() {
                    /**
                     * 保存到文件
                     * @param path  文件名称
                     * @param bytes 字节流
                     * @throws IOException
                     */
                    private String path = "D:/java/weixin.html";

                    @Override
                    public void process(ResultItems resultItems, Task task) {
                        Article article = resultItems.get("article");

                        // 判断获取到的数据不为空
                        if (article != null) {
                            try {
                                // 如果有值则进行保存
                                File txt = new File(path);
                                if (!txt.exists()) {
                                    txt.createNewFile();
                                }
                                FileOutputStream fileOutputStream = new FileOutputStream(txt);
                                byte bytes[] = new byte[512];
                                // 格式化html
                                Document doc = Jsoup.parse(article.getResultHtml());
                                doc.outputSettings().prettyPrint(true);
                                String formattedHtml = doc.html();

                                bytes = formattedHtml.getBytes();
                                fileOutputStream.write(bytes);
                                fileOutputStream.close();
                            } catch (FileNotFoundException e) {
                                throw new RuntimeException(e);
                            } catch (IOException e) {
                                throw new RuntimeException(e);
                            }

                        }
                    }
                })
                .run();
    }
}
