package com.gpt.article.service.impl;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.UUID;

import com.gpt.article.domain.*;
import com.gpt.article.service.IGptSpiderArticleContentService;
import com.gpt.article.service.IGptSpiderArticleService;
import com.gpt.article.service.IGptSysFileService;
import com.ruoyi.common.config.RuoYiConfig;
import com.ruoyi.common.core.domain.entity.GptSpiderClass;
import com.ruoyi.common.utils.DateUtils;
import com.ruoyi.common.utils.StringUtils;
import com.ruoyi.common.utils.file.FileUtils;
import com.ruoyi.common.utils.sign.Md5Utils;
import com.zhucx.utils.JsoupUtil;
import org.apache.commons.lang3.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.gpt.article.mapper.GptSpiderDiyMapper;
import com.gpt.article.service.IGptSpiderDiyService;

import com.zhucx.Examle;
import com.zhucx.ZhuCrawler;
import com.zhucx.loader.strategy.PlaywrightPageLoader;
import com.zhucx.parser.PageParser;
import com.zhucx.parser.htmlsucker.Article;
import com.zhucx.parser.htmlsucker.HtmlSucker;
import com.zhucx.utils.DownloadURLFile;
import com.zhucx.utils.JsoupUtil;
import org.apache.commons.lang3.ObjectUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * 采集配置Service业务层处理
 * 
 * @author zhuzi
 * @date 2023-10-24
 */
@Service
public class GptSpiderDiyServiceImpl implements IGptSpiderDiyService 
{
    @Autowired
    private GptSpiderDiyMapper gptSpiderDiyMapper;
    protected final Logger logger = LoggerFactory.getLogger(this.getClass());
    @Autowired
    private IGptSpiderArticleService gptSpiderArticleService;
    @Autowired
    private IGptSpiderArticleContentService gptSpiderArticleContentService;
    @Autowired
    private IGptSysFileService gptSysFileService;

    /**
     * 查询采集配置
     * 
     * @param id 采集配置主键
     * @return 采集配置
     */
    @Override
    public GptSpiderDiy selectGptSpiderDiyById(Long id)
    {
        return gptSpiderDiyMapper.selectGptSpiderDiyById(id);
    }

    /**
     * 查询采集配置列表
     * 
     * @param gptSpiderDiy 采集配置
     * @return 采集配置
     */
    @Override
    public List<GptSpiderDiy> selectGptSpiderDiyList(GptSpiderDiy gptSpiderDiy)
    {
        return gptSpiderDiyMapper.selectGptSpiderDiyList(gptSpiderDiy);
    }

    /**
     * 新增采集配置
     * 
     * @param gptSpiderDiy 采集配置
     * @return 结果
     */
    @Override
    public int insertGptSpiderDiy(GptSpiderDiy gptSpiderDiy)
    {
        return gptSpiderDiyMapper.insertGptSpiderDiy(gptSpiderDiy);
    }

    /**
     * 修改采集配置
     * 
     * @param gptSpiderDiy 采集配置
     * @return 结果
     */
    @Override
    public int updateGptSpiderDiy(GptSpiderDiy gptSpiderDiy)
    {
        return gptSpiderDiyMapper.updateGptSpiderDiy(gptSpiderDiy);
    }

    /**
     * 批量删除采集配置
     * 
     * @param ids 需要删除的采集配置主键
     * @return 结果
     */
    @Override
    public int deleteGptSpiderDiyByIds(Long[] ids)
    {
        return gptSpiderDiyMapper.deleteGptSpiderDiyByIds(ids);
    }

    /**
     * 删除采集配置信息
     * 
     * @param id 采集配置主键
     * @return 结果
     */
    @Override
    public int deleteGptSpiderDiyById(Long id)
    {
        return gptSpiderDiyMapper.deleteGptSpiderDiyById(id);
    }

    /**
     * 查询采集来源数据
     * @return
     */
    @Override
    public List<GptSpiderItem> getSpiderItem() {
        return gptSpiderDiyMapper.getSpiderItemBystatus();
    }

    /**
     * 查询生成模型数据
     * @return
     */
    @Override
    public List<GptPrompts> getPrompts() {
        return gptSpiderDiyMapper.getPromptsBystatus();
    }

    /**
     * 查询采集来源数据
     * @return
     */
    @Override
    public List<GptSpiderClass> getSpiderClass() {
        return gptSpiderDiyMapper.getSpiderClassAll();
    }

    @Override
    public Object spiderArticles(Long[] ids) {

        List<GptSpiderDiy> gptSpiderDiy = gptSpiderDiyMapper.getSelectedDiy(ids);
        List<String> resUrls = new ArrayList<String>();
        for(GptSpiderDiy diy:gptSpiderDiy) {
            diy.setSpiderStatus("3");
            gptSpiderDiyMapper.updateGptSpiderDiy(diy);
        }
        try {
            toSpider(resUrls,gptSpiderDiy);
        }catch (Exception e){
            logger.error(e.getMessage());
        }
        return null;
    }

    public boolean toSpider(List<String> resUrls,List<GptSpiderDiy> gptSpiderDiy){
        //第一步抓取列表
        for(GptSpiderDiy diy:gptSpiderDiy) {
            try {
            logger.info("正在抓取网页列表..."+diy.getSpiderUrl());
            diy.setSpiderStatus("1");
            gptSpiderDiyMapper.updateGptSpiderDiy(diy);
            ZhuCrawler zhuCrawler = new ZhuCrawler.Builder()
                    .setUrls(diy.getSpiderUrl())
//                    .setPageLoader(new PlaywrightPageLoader())
                    .setThreadCount(Integer.valueOf(RuoYiConfig.getSpiderThreadCount()))
                    .setSelectElement(diy.getListDiv())
                    .setPageParser(new PageParser<Object>() {
                        @Override
                        public void parse(Document html, Element pageVoElement) {
                            //第二步抓取列表内文章
//                            logger.info("正在抓取“" + diy.getListDiv() + "”文章...");
                            //获取元素所有链接
                            Elements el = html.select(diy.getListDiv());
                            for(Element el1:el){
                                Set<String> links2 = new JsoupUtil().findLinks(el1);
                                resUrls.addAll(links2);
                            }
                        }

                    })
                    .build();
            zhuCrawler.start(true);

            //文章地址
            String[] urls = resUrls.toArray(new String[resUrls.size()]);
            if(urls.length==0){
                diy.setSpiderStatus("4");
                gptSpiderDiyMapper.updateGptSpiderDiy(diy);
                return false;
            }
            logger.info("准备解析文章..");

            ZhuCrawler artcleCrawler = new ZhuCrawler.Builder()
                    .setUrls(urls)
                    .setThreadCount(Integer.valueOf(RuoYiConfig.getSpiderThreadCount()))
//                    .setPageLoader(new PlaywrightPageLoader())
                    .setSelectElement(diy.getSaveDiv())
                    .setDeleteElement(diy.getDeleteDiv())
                    .setPageParser(new PageParser<Object>() {
                        Article article = null;
                        GptSysFile gptSysFile  =null;
                        @Override
                        public void parse(Document html, Element pageVoElement) throws IOException, InterruptedException {
                            System.out.println("解析文章");
                            String pageUrl = html.baseUri();

                            //解析文章
                            article = new Article();
                            article = HtmlSucker.select(HtmlSucker.TEXT_DENSITY_EXTRACTOR).parse(html,pageUrl,diy.getTitleDiv(), diy.getSaveDiv(),diy.getPushTimeDiv());
//                            System.out.println(article+"---");
                            if (ObjectUtils.isNotEmpty(article)) {
                                String artConetnt = article.getContent();
                                Document contentDoc = Jsoup.parse(artConetnt,pageUrl);
                                List<Element> newsImgList = contentDoc.getElementsByTag("img");
                                String urlMd5 = Md5Utils.hash(pageUrl);

                                System.out.println("图片数量"+newsImgList.size());
                                int imgNo = 0;
                                for (Element imgEl : newsImgList) {
                                    String absoImgUrl = imgEl.attr("src");
                                    String imgName = imgEl.attr("alt");
                                    if(StringUtils.isEmpty(absoImgUrl)||(!absoImgUrl.startsWith("http")&&!absoImgUrl.startsWith("https"))){

                                        absoImgUrl  = imgEl.attr("abs:src");
                                        System.out.println("absoImgUrl:"+absoImgUrl);
                                        if(StringUtils.isEmpty(absoImgUrl)){
                                            absoImgUrl = imgEl.attr("src");
                                        }
                                    }
                                    if(absoImgUrl.startsWith("/")&&!absoImgUrl.startsWith("//")){
                                        absoImgUrl = "http://"+pageUrl+absoImgUrl;
                                    }
                                    if(!absoImgUrl.startsWith("/")&&!absoImgUrl.startsWith("http")){
                                        absoImgUrl =  imgEl.attr("abs:src");
                                    }
                                    if(absoImgUrl.startsWith("//")||StringUtils.isEmpty(absoImgUrl)||(!absoImgUrl.startsWith("http")&&!absoImgUrl.startsWith("https"))){
                                        absoImgUrl = "http:"+imgEl.attr("src");
                                    }
                                    String imgurlMd5 = Md5Utils.hash(absoImgUrl);
                                    String paperimgName = null;
//                                    UUID paperImgUuid = UUID.randomUUID();
                                    paperimgName = imgurlMd5+ ".jpg";
                                    // 上传文件路径
                                    String savePaperImgPath = RuoYiConfig.getUploadPath()+"/"+ FileUtils.getDateFilePath();
//                                    String filePath = RuoYiConfig.getUploadPath();
                                    DownloadURLFile.downFile(absoImgUrl, paperimgName, savePaperImgPath);

                                    //保存图
                                    String saveName = StringUtils.isEmpty(imgName)?article.getTitle()+"-第"+imgNo+"张图":imgName;
                                    String newImg = savePaperImgPath+paperimgName;
                                    File newFile = new File(newImg);
                                    gptSysFile  =  new GptSysFile();
                                    gptSysFile.setDir(FileUtils.getDateFilePath());
                                    gptSysFile.setName(saveName);
                                    gptSysFile.setSizes(FileUtils.GetFileSize(newFile));
                                    gptSysFile.setUserId("admin");
                                    gptSysFile.setUserName("管理员");
                                    gptSysFile.setArtTitle(article.getTitle());
                                    gptSysFile.setXitustatus("0");
                                    gptSysFile.setUrlMd5(urlMd5);
                                    gptSysFile.setImgmd5(imgurlMd5);
                                    gptSysFile.setOrderNo(imgNo);

                                    try {
                                        gptSysFileService.insertGptSysFile(gptSysFile);
                                    }catch (Exception e){
                                        System.out.println("图片已存在无需保存："+e.getMessage());
                                    }
//                                    String url = CommonController.getFileUrl;
                                    // 重新赋值img路径
                                    String newsDetailImg = "/dev-api/profile/upload/" +FileUtils.getDateFilePath()+ paperimgName;
                                    imgEl.attr("src", newsDetailImg);
                                    imgNo++;
                                }
                                artConetnt = contentDoc.toString();

                                artConetnt= artConetnt.replaceAll("\"(.*?)http(.*?)\"","'#'");
                                artConetnt= artConetnt.replaceAll("\'(.*?)http(.*?)\'","'#'");
                                artConetnt= artConetnt.replaceAll("\"(.*?)https(.*?)\"","'#'");
                                artConetnt= artConetnt.replaceAll("\'(.*?)https(.*?)\'","'#'");
                                artConetnt=  delHtmlTags(artConetnt);
                                GptSpiderArticleContent gptSpiderArticleContent  = new GptSpiderArticleContent();
                                gptSpiderArticleContent.setUrlMd5(urlMd5);
                                gptSpiderArticleContent.setArtContent(artConetnt);
                                long contentId = 1l;
                                try {
                                    contentId = gptSpiderArticleContentService.insertGptSpiderArticleContent(gptSpiderArticleContent);
                                }catch (Exception e){
                                    System.out.println("文章内容已存在无需保存："+e.getMessage());
                                }

                                GptSpiderArticle gptSpiderArticle = new GptSpiderArticle();
                                gptSpiderArticle.setTitle(article.getTitle());
                                gptSpiderArticle.setPromptsId(diy.getPromptsId());
                                gptSpiderArticle.setSpiderDiyId(diy.getId());
                                gptSpiderArticle.setSpiderDiyName(diy.getName());
                                gptSpiderArticle.setContentId(contentId);
                                gptSpiderArticle.setUrlMd5(urlMd5);
                                gptSpiderArticle.setClassId(diy.getTypeId());
                                gptSpiderArticle.setClassName(diy.getTypeName());
                                gptSpiderArticle.setSourceId(diy.getId());
                                gptSpiderArticle.setSourceName(diy.getItemName());
                                gptSpiderArticle.setStatus("0");
                                gptSpiderArticle.setPushTime(DateUtils.parseDate(article.getPushTime()));
                                gptSpiderArticle.setSpiderUrl(pageUrl);
                                try {
                                    gptSpiderArticleService.insertGptSpiderArticle(gptSpiderArticle);
                                }catch (Exception e){
                                    System.out.println("文章已存在无需保存："+e.getMessage());
                                }
                            }

                        }

                    })
                    .build();
            artcleCrawler.start(true);
        }catch (Exception e){
            logger.error(e.getMessage());
        }finally {
            System.out.println("执行采集完成.....");
            diy.setSpiderStatus("2");
            gptSpiderDiyMapper.updateGptSpiderDiy(diy);
        }

        }
        return true;
    }


    /**
     * 去除html代码中含有的标签
     * @param htmlStr
     * @return
     */
    public static String delHtmlTags(String htmlStr) {

        //定义script的正则表达式，去除js可以防止注入
        String scriptRegex="<script[^>]*?>[\\s\\S]*?<\\/script>";

        String pattern= "href=\"([^\" rel=\"external nofollow\" ]*)\"";

        //定义style的正则表达式，去除style样式，防止css代码过多时只截取到css样式代码
        String styleRegex="<style[^>]*?>[\\s\\S]*?<\\/style>";
        //定义HTML标签的正则表达式，去除标签，只提取文字内容
        String htmlRegex="<[^>]+>";
        //定义空格,回车,换行符,制表符
        String spaceRegex = "\\s*|\t|\r|\n";

        // 过滤script标签
        htmlStr = htmlStr.replaceAll(scriptRegex, "");
        htmlStr = htmlStr.replaceAll("//","http://");
//        // 过滤style标签
//        htmlStr = htmlStr.replaceAll(styleRegex, "");
//        // 过滤html标签
//        htmlStr = htmlStr.replaceAll(htmlRegex, "");
//        // 过滤空格等
//        htmlStr = htmlStr.replaceAll(spaceRegex, "");
        return htmlStr.trim(); // 返回文本字符串
    }
}
