package ltd.hxya.novel.crawl.service.impl;

import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.toolkit.IdWorker;
import com.rabbitmq.client.Channel;
import lombok.extern.slf4j.Slf4j;
import ltd.hxya.novel.common.bean.Result;
import ltd.hxya.novel.common.config.ThreadPoolConfig;
import ltd.hxya.novel.common.constant.rabbit.RabbitConstant;
import ltd.hxya.novel.common.constant.redis.RedisConstant;
import ltd.hxya.novel.common.framework.CustomFuture;
import ltd.hxya.novel.common.to.book.BookChapterTo;
import ltd.hxya.novel.common.to.book.BookTo;
import ltd.hxya.novel.common.utils.*;
import ltd.hxya.novel.common.vo.BookIndexVo;
import ltd.hxya.novel.common.vo.SearchParam;
import ltd.hxya.novel.crawl.bean.ChapterInfo;
import ltd.hxya.novel.crawl.bean.CrawlTask;
import ltd.hxya.novel.crawl.bean.CrawlVo;
import ltd.hxya.novel.crawl.entity.CrawlRule;
import ltd.hxya.novel.crawl.entity.UnexecutedTask;
import ltd.hxya.novel.crawl.feign.BookFeignService;
import ltd.hxya.novel.crawl.handler.CrawlHandler;
import ltd.hxya.novel.crawl.job.JobHandler;
import ltd.hxya.novel.crawl.service.CrawlTaskService;
import ltd.hxya.novel.crawl.service.ICrawlSourceService;
import ltd.hxya.novel.crawl.service.ILastCrawlIndexService;
import ltd.hxya.novel.crawl.service.IUnexecutedTaskService;
import ltd.hxya.novel.entity.crawl.LastCrawlIndex;
import ltd.hxya.novel.entity.rowdata.NovelRowData;
import org.jetbrains.annotations.NotNull;
import org.springframework.amqp.core.Message;
import org.springframework.amqp.rabbit.annotation.Queue;
import org.springframework.amqp.rabbit.annotation.*;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import reactor.core.publisher.Flux;

import java.io.IOException;
import java.net.URISyntaxException;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.stream.Collectors;

@Slf4j
@Service
public class CrawlTaskServiceImpl implements CrawlTaskService {

    @Autowired
    private ICrawlSourceService crawlSourceService;

    @Autowired
    private BookFeignService bookFeignService;

    @Autowired
    private ThreadPoolExecutor executor;

    @Autowired
    private RabbitTemplate rabbitTemplate;

    @Autowired
    private CrawlHandler crawlHandler;

    @Autowired
    private ILastCrawlIndexService lastCrawlIndexService;

    @Autowired
    private IUnexecutedTaskService unexecutedTaskService;

    @Autowired
    private ThreadPoolConfig threadPoolConfig;

    public static InheritableThreadLocal<List<CrawlVo>> threadLocal = new InheritableThreadLocal<>();

    public static InheritableThreadLocal<CrawlVo> defaultCrawlThread = new InheritableThreadLocal<>();

    public static InheritableThreadLocal<List<String>> filterWordThreadLocal = new InheritableThreadLocal<>();

    @Autowired
    private RedisUtils redisUtils;

    //根据小说爬虫源id和小说id爬取小说目录,以及小说目录的访问地址
    public List<BookChapterTo> indexInfo(CrawlRule crawlRule, BookTo bookTo) throws IOException {
        String bookHref = crawlRule.getBookHref();
        Integer pageIndex = null;
        if (bookHref.indexOf("index_{page}.html") != -1) {
            //需要翻页
            pageIndex = 1;
            bookHref = bookHref.replace("{page}", pageIndex.toString());
        }
        String bookUrl = CrawlUtils.getBookUrl(crawlRule.getCrawlUrl(), bookHref, bookTo.getCrawlBookId());

        if (StringUtils.isEmpty(bookUrl)) {
            log.debug("小说章节url为空,继续执行爬取下一本小说......");
            return null;
        }
        //TODO 在爬取目录之前判断是否是更新前的章节
        List<BookChapterTo> chapterInfos = new ArrayList<>();
        while (true) {
            Boolean isfind = crawlIndex(bookUrl, crawlRule, bookTo, chapterInfos);
            if (!isfind) {
                break;
            }
            //下一页的
            if (pageIndex == null) {
                break;
            }

            bookUrl = bookUrl.replace("index_" + pageIndex + ".html", "index_" + (pageIndex + 1) + ".html");
            pageIndex++;
        }
        //从其中取出从目前数据库中的最后一章到最后的信息
        return chapterInfos;
    }

    public Boolean crawlIndex(String bookUrl, CrawlRule crawlRule, BookTo bookTo, List<BookChapterTo> chapterTos) throws IOException {
        String body = crawlHandler.crawlBody(bookUrl);
        if (!StringUtils.isEmpty(crawlRule.getIndexPartPattern())) {
            body = CrawlUtils.simpleCrawlRulePattern(body, crawlRule.getIndexPartPattern());
        }
        if (StringUtils.isEmpty(body)) {
            return false;
        }

        Matcher matcher = CrawlUtils.patternCheck(body, crawlRule.getIndexPattern());
        //判断是否是当前更新到的章节
        Boolean isCurrentChapter = false;

        //匹配段落
        //调用远程服务查询当前章节
        Boolean isFind = false;
        if (bookUrl.endsWith(".html")){
            String[] split = bookUrl.split("/");
            bookUrl = bookUrl.replace(split[split.length-1], "");
        }
        while (matcher.find()) {
            isFind = true;
            String bookContentHref = matcher.group(1);
            if (!bookContentHref.startsWith("/")&&!bookContentHref.startsWith("http")){
                bookContentHref=bookUrl+bookContentHref;
            }
            String bookIndexName = matcher.group(2);
            BookChapterTo chapterInfo = new BookChapterTo();
            bookIndexName = PatternUtils.removeSpecial(bookIndexName);
            Boolean flag = filterChapterName(bookIndexName);
            if (flag){
                continue;
            }
            if (StringUtils.isEmpty(bookTo.getLastIndexName()) || isCurrentChapter) {
                chapterInfo.setChapterName(bookIndexName);
                chapterInfo.setIndexNum(chapterTos.size());
                chapterInfo.setChapterHref(bookContentHref);
                String indexId = IdWorker.getIdStr();
                chapterInfo.setBookId(bookTo.getCrawlBookId());
                chapterInfo.setIndexId(indexId);
                chapterTos.add(chapterInfo);
                //将数据保存到Redis中
                log.debug("链接：" + bookContentHref + "章节：" + bookIndexName);
            } else {
                isCurrentChapter = bookIndexName.equals(bookTo.getLastIndexName());
            }

        }
        return isFind;
    }


    //爬取小说的基本信息
    public BookTo novelInfo(String bookId, Integer crawlId, CrawlRule crawlRule) throws IOException {
        String bookHref = crawlRule.getBookHref();
        String[] strings = null;
        if (bookHref.contains("/index_{page}.html")) {
            //需要翻页
            strings = bookHref.split("/index_\\{page\\}.html");
        }
        String bookUrl = null;
        if (strings != null) {
            bookUrl = CrawlUtils.getBookUrl(crawlRule.getCrawlUrl(), strings[0], bookId);
        } else {
            bookUrl = CrawlUtils.getBookUrl(crawlRule.getCrawlUrl(), crawlRule.getBookHref(), bookId);
        }

        //判断BookURL是否为空，当前爬虫规则是否符合当前网站
        if (bookUrl.isEmpty()) {
            log.debug("bookURL为null");
            return null;
        }
        //如果爬虫信息并不为空，爬取当前页面信息
        bookUrl = bookUrl + "/";
        String body = crawlHandler.crawlBody(bookUrl);
        BookTo bookTo = getBookTo(bookId, crawlId, crawlRule, body);
        // 更新数据库
        return bookTo;
    }

    //在没有爬虫规则的情况下爬取小说的基本信息
    @Override
    public BookTo novelInfo(String bookId, Integer crawlId) throws IOException, ExecutionException, InterruptedException {
        //得到小说的默认爬虫源
        CrawlVo crawlVo = null;
        if (crawlId == null) {
            crawlVo = crawlSourceService.defaultCrawlSource();
        } else {
            crawlVo = crawlSourceService.getCrawlSourceById(crawlId);
        }
        //将默认的爬虫源中的爬虫规则封装
        CrawlRule crawlRule = crawlVo.getCrawlRule();
        //爬取页面的信息
        return novelInfo(bookId, crawlId, crawlRule);


    }

    //根据爬虫网站的小说id，爬取小说信息
    public void crawlNovelByCrawlId(String bookId,String crawlSourceId,BookTo book){

    }
    public void crawlNovelByCrawlId(String bookId,CrawlVo crawlVo,BookTo book){
        checkIsLastIndex(book);
        crawlChapter(book);
    }
    public void crawlNovelByCrawlId(String bookId) throws IOException {
        CrawlVo crawlVo = defaultCrawlThread.get();
        BookTo bookTo = novelInfo(bookId, crawlVo.getId(), crawlVo.getCrawlRule());
        //根据爬虫id和爬虫网站的小说id确定小说从数据库中查询小说的最后一章，如果有，则添加到bookTo中
        crawlNovelByCrawlId(bookId,crawlVo,bookTo);

    }

    @Override
    public void crawlNovelByCrawlId(String bookId, Integer sourceId) throws IOException {
        CrawlVo crawlVo = crawlSourceService.getCrawlSourceById(sourceId);
        BookTo bookTo = novelInfo(bookId, crawlVo.getId(), crawlVo.getCrawlRule());
        crawlNovelByCrawlId(bookId,crawlVo,bookTo);
    }

    private void checkIsLastIndex(BookTo bookTo) {
        Result<LastCrawlIndex> result = bookFeignService.bookLastIndex(bookTo);
        LastCrawlIndex lastCrawlIndex = result.getData();
       /* QueryWrapper<LastCrawlIndex> crawlIndexQueryWrapper = new QueryWrapper<>();
        crawlIndexQueryWrapper.eq("crawl_book_id", bookTo.getCrawlBookId()).eq("crawl_source_id", bookTo.getCrawlSourceId());
        LastCrawlIndex lastCrawlIndex = lastCrawlIndexService.getBaseMapper().selectOne(crawlIndexQueryWrapper);*/
        if (ObjectUtils.isEmpty(lastCrawlIndex)) {
            return;
        }
        bookTo.setLastIndexName(lastCrawlIndex.getLastIndexName());
    }


    //根据关键字，搜索，爬取小说列表
    @Override
    public List<BookTo> crawlNovelList(Integer crawlId, String keyword) throws IOException, ExecutionException, InterruptedException {
        CrawlVo crawlVo = null;
        //如果爬虫源id为空，则使用默认爬虫源
        if (crawlId == null) {
            crawlVo = crawlSourceService.defaultCrawlSource();
            crawlId = crawlVo.getId();

        } else {
            crawlVo = crawlSourceService.getCrawlSourceById(crawlId);
        }
        BookTo book = new BookTo();
       // book.setKeyword(keyword);
        book.setCrawlSourceId(crawlId);
        CompletableFuture<List<String>> crawlIdsCompletableFuture = CustomFuture.supplyExec(executor).thenApply((value) -> {
            Result<List<BookTo>> listResult = bookFeignService.isAdd(book);
            List<BookTo> data = listResult.getData();
            if (CollectionUtils.isEmpty(data)) {
                return new ArrayList<String>();
            }
            List<String> crawlBookIds = data.stream().map(bookTo -> bookTo.getCrawlBookId()).collect(Collectors.toList());
            return crawlBookIds;
        });

        CrawlRule crawlRule = crawlVo.getCrawlRule();
        String searchUrl = CrawlUtils.getSearchUrl(crawlRule.getCrawlUrl(), crawlRule.getSearchHref(), keyword);

        String body = crawlHandler.crawlBody(searchUrl);
        List<BookTo> bookTos = new ArrayList<>();
        Matcher matcher = CrawlUtils.patternCheck(body, crawlRule.getSearchListGroupPattern());
        //获取远程服务中的根据关键字和爬虫源id获取的小说列表的id
        List<String> crawlBookIds = crawlIdsCompletableFuture.get();
        while (matcher.find()) {
            BookTo bookTo = getBookTo(crawlRule, matcher);
            bookTo.setCrawlSourceId(crawlId);
            bookTo.setIsAdd(!CollectionUtils.isEmpty(crawlBookIds) && crawlBookIds.contains(bookTo.getCrawlBookId()));
            bookTos.add(bookTo);
        }

        return bookTos;
    }

    //根据小说的基本信息，爬取小说的章节列表
    @Override
    public List<ChapterInfo> crawlIndexList(BookTo bookTo) throws IOException {
        Result<List<BookIndexVo>> bookIndexResult = bookFeignService.listBookIndex(bookTo.getId(), null);
        List<BookIndexVo> bookIndexVos = bookIndexResult.getData();
        Map<String, Integer> bookIndexMap = null;
        if (!CollectionUtils.isEmpty(bookIndexVos)) {
            bookIndexMap = bookIndexVos.stream().collect(Collectors.toMap(BookIndexVo::getIndexName, BookIndexVo::getIndexNum));
        } else {
            bookIndexMap = new HashMap<>();
        }
        CrawlVo crawlVo = crawlSourceService.getCrawlSourceById(bookTo.getCrawlSourceId());
        CrawlRule crawlRule = crawlVo.getCrawlRule();
        String bookUrl = CrawlUtils.getBookUrl(crawlRule.getCrawlUrl(), crawlRule.getBookHref(), bookTo.getCrawlBookId());
        String body = crawlHandler.crawlBody(bookUrl);
        Matcher matcher = CrawlUtils.patternCheck(body, crawlRule.getIndexPattern());
        List<ChapterInfo> chapterInfos = new ArrayList<>();
        while (matcher.find()) {
            String indexHref = matcher.group(1);
            String indexName = matcher.group(2);
            Integer num = bookIndexMap.get(indexName);
            //TODO 根据章节名筛选出可用的章节
            Boolean flag = filterChapterName(indexName);
            if (flag){
                continue;
            }

            ChapterInfo chapterInfo = new ChapterInfo();
            chapterInfo.setChapterHref(indexHref);
            chapterInfo.setChapterName(indexName);

            Map<String, Object> isAddMap = new HashMap<>();
            if (num != null) {
                isAddMap.put("isAdd", true);

            } else {
                isAddMap.put("isAdd", false);
            }
            chapterInfo.setParams(isAddMap);
            chapterInfos.add(chapterInfo);

        }
        return chapterInfos;
    }
    //如果返回TRUE则代表该目录需要被过滤掉，如果返回FALSE则代表该目录不需要被过滤掉
    private Boolean filterChapterName(String indexName) {
        if (StringUtils.isEmpty(indexName)){
            return true;
        }
        Boolean flag=false;
        List<String> filterWords = filterWordThreadLocal.get();
        //从redis中获取需要过滤的词
        if (CollectionUtils.isEmpty(filterWords)){
            return flag;
        }
        for (String filterWord : filterWords) {
            double similarity = CosineSimilarity.getSimilarity(indexName, filterWord);
            if (similarity>=50){
                flag=true;
                break;
            }
        };
        return flag;
    }

    @Override
    public void addBookIndex(CrawlTask crawlTask) {
        CrawlVo crawlVo = crawlSourceService.getCrawlSourceById(crawlTask.getCrawlSourceId());
        crawlTask.setCrawlRule(crawlVo.getCrawlRule());
        rabbitTemplate.convertAndSend(RabbitConstant.CRAWL_TASK_EXCHANGE, RabbitConstant.CRAWL_TASK_ROUTING_KEY, crawlTask);
    }


    //根据搜索条件，爬取小说内容
    @Override
    public void searchCrawlBook(SearchParam searchParam) {
        String queryWord = searchParam.getQueryWord();
        // List<CrawlSource> crawlSources = crawlSourceService.getBaseMapper().selectList(null);
        //获取默认的爬虫源
        //CrawlVo crawlVo = crawlSourceService.defaultCrawlSource();
        List<BookTo> bookTos = null;

        try {
            bookTos = crawlNovelList(null, queryWord);
        } catch (IOException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        List<UnexecutedTask> unexecutedTasks = new ArrayList<>();
        bookTos.forEach(bookTo -> {
            //添加到爬取任务中
            UnexecutedTask unexecutedTask = new UnexecutedTask();
            unexecutedTask.setCrawlBookId(bookTo.getCrawlBookId());
            unexecutedTask.setCrawlSourceId(bookTo.getCrawlSourceId());
            unexecutedTask.setBookName(bookTo.getBookName());
            unexecutedTask.setCreateTime(LocalDateTime.now());
            unexecutedTask.setUpdateTime(LocalDateTime.now());
            unexecutedTasks.add(unexecutedTask);
        });
        unexecutedTaskService.saveBatch(unexecutedTasks);


    }

    //封装成小说信息
    @NotNull
    private BookTo getBookTo(CrawlRule crawlRule, Matcher matcher) {
        BookTo bookTo = new BookTo();
        String group = matcher.group();
        String href = CrawlUtils.simpleCrawlRulePattern(group, crawlRule.getSearchHrefPattern());
        bookTo.setCrawlBookId(CrawlUtils.simpleCrawlRulePattern(group, crawlRule.getSearchCrawlBookIdPattern()));
        bookTo.setAuthorName(CrawlUtils.simpleCrawlRulePattern(group, crawlRule.getSearchAuthorNamePattern()));
        bookTo.setPicUrl(CrawlUtils.simpleCrawlRulePattern(group, crawlRule.getSearchImgPattern()));
        bookTo.setBookDesc(CrawlUtils.simpleCrawlRulePattern(group, crawlRule.getSearchDescPattern()));
        bookTo.setBookName(CrawlUtils.simpleCrawlRulePattern(group, crawlRule.getSearchBookNamePattern()));
        return bookTo;
    }

    private BookTo getBookTo(String bookId, Integer crawlId, CrawlRule crawlRule, String body) throws IOException {
        String bookImg = CrawlUtils.simpleCrawlRulePattern(body, crawlRule.getBookImgPattern());
        String bookCategory = CrawlUtils.simpleCrawlRulePattern(body, crawlRule.getBookCategoryPattern());
        String bookDesc = CrawlUtils.simpleCrawlRulePattern(body, crawlRule.getBookDescPattern());
        String bookAuthor = CrawlUtils.simpleCrawlRulePattern(body, crawlRule.getBookAuthorPattern());
        String bookName = CrawlUtils.simpleCrawlRulePattern(body, crawlRule.getBookNamePattern());
        //TODO bookStatus部分代码耦合度较高，有待改进
        String bookStatus = CrawlUtils.simpleCrawlRulePattern(body, crawlRule.getBookStatusPattern());
        if (StringUtils.isEmpty(bookName)){
            return null;
        }
        //TODO 根據小說名稱以及簡介，判斷是否需要篩選掉
        BookTo bookTo = new BookTo();
        bookTo.setStatus("连载".equals(bookStatus));
        bookTo.setBookDesc(bookDesc);
        bookTo.setBookName(bookName);
        bookTo.setCrawlBookId(bookId);
        bookTo.setAuthorName(bookAuthor);
        // 上传小说图片并返回图片URL
        //String bookPicUrl = OssUtils.uploadNetworkFlow(bookImg);
        //陪陪url是否是全路径，如果是全路径，则直接用，如果不是，则拼接本站地址
        String imgUrl = BaseUtils.getUrl(bookImg, crawlRule.getCrawlUrl());
        bookTo.setPicUrl(imgUrl);
        bookTo.setCatName(bookCategory);

        bookTo.setCrawlSourceId(crawlId);
        return bookTo;
    }

    @RabbitListener(bindings = {
            @QueueBinding(value =
            @Queue(RabbitConstant.CRAWL_TASK_QUEUE),
                    exchange = @Exchange(RabbitConstant.CRAWL_TASK_EXCHANGE),
                    key = RabbitConstant.CRAWL_TASK_ROUTING_KEY)})
    @RabbitHandler
    public void novelContent(CrawlTask crawlTask, Message message, Channel channel) throws IOException, URISyntaxException, InterruptedException, ExecutionException, SQLException, ClassNotFoundException {

        //TODO 需要解决在爬虫时重复消费的问题
        long deliveryTag = message.getMessageProperties().getDeliveryTag();
        //爬取小说章节内容，并放到集合中
        List<BookChapterTo> chapterInfos = crawlChapterContent(crawlTask);
        BookTo bookTo = novelInfo(crawlTask.getBookId(), crawlTask.getCrawlSourceId(), crawlTask.getCrawlRule());
        //将查询到的信息封装成ChapterTos集合
        List<BookChapterTo> chapterTos = new ArrayList<>();
        //构造json并写入
        crawlTask.setBookId(IdWorker.getIdStr());
        String json = strucJson(chapterInfos, bookTo);
        HDFSUtils.createFile(crawlTask.getBookId(), json);
        //将数据load到hive下
        HiveUtils.otherOperator("load data inpath '" + HDFSUtils.getDefaultFullPath(crawlTask.getBookId()) + "' into table book_row_data");
        channel.basicAck(deliveryTag, true);
    }

    //将实体类转换成JSON，保存到hive中
    @NotNull
    private String strucJson(List<BookChapterTo> chapterInfos, BookTo bookTo) {
        String json = "";
        String bookId = IdWorker.getIdStr();
        for (BookChapterTo chapterInfo : chapterInfos) {
            NovelRowData novelRowData = new NovelRowData();
            novelRowData.setContent(chapterInfo.getContent());
            novelRowData.setIndexNum(chapterInfo.getIndexNum());
            novelRowData.setIndexName(chapterInfo.getChapterName().replaceAll("\\s", ""));
            BeanUtils.copyProperties(bookTo, novelRowData);
            //novelRowData.setCrawlBookId(bookTo.getCrawlBookId());
            novelRowData.setId(IdWorker.getIdStr());
            novelRowData.setCrawlBookId(chapterInfo.getBookId());
            novelRowData.setBookId(bookId);
            novelRowData.setBookStatus(true);
            //novelRowData.setCrawlLastTime(Timestamp.valueOf(LocalDateTime.now()));
            if ("".equals(json)) {
                json = json + JSON.toJSONString(novelRowData);
            } else {
                json = json + "\n" + JSON.toJSONString(novelRowData);
            }
        }
        return json;
    }

    //爬取小说章节内容
    public List<BookChapterTo> crawlChapterContent(CrawlTask crawlTask) throws InterruptedException {
        CrawlRule crawlRule = crawlTask.getCrawlRule();
        //存储章节名和正文信息
        //key为url，value为章节名
        List<BookChapterTo> chapterInfos = crawlTask.getChapterInfos().stream().filter(chapterInfo -> {
            if (chapterInfo.getIndexNum() < 5)
                return true;
            else return false;
        }).collect(Collectors.toList());

        List<BookChapterTo> chapterTos = new ArrayList<>();
        ThreadPoolExecutor threadPoolExecutor = threadPoolConfig.threadPoolExecutor();
        for (BookChapterTo chapterInfo : chapterInfos) {
            crawlContetBody(crawlRule, chapterTos, chapterInfo);
        }
        threadPoolExecutor.shutdown();
        threadPoolExecutor.awaitTermination(Integer.MAX_VALUE, TimeUnit.NANOSECONDS);
        return chapterTos;
    }

    private void crawlContetBody(CrawlRule crawlRule, List<BookChapterTo> chapterTos, BookChapterTo chapterInfo) {
        try {
            Random random = new Random();
            Thread.sleep(random.nextInt(5) * 1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        String crawlUrl = crawlRule.getCrawlUrl();

        //判断小说内容是否是分页的，如果是分页的，循环调用，将内容拼接在一起
        // String chapterUrl = CrawlUtils.getChapterUrl(crawlUrl, crawlRule.getBookContentHref(), chapterInfo.getBookId(), chapterInfo.getIndexId());

        Matcher matcher = PatternUtils.pattern("\\{([0-9])\\}", crawlRule.getBookContentHref());
        String totalPage = PatternUtils.simpleStrinMatching(matcher);
        if (StringUtils.isEmpty(totalPage)) {
            totalPage = "1";
        }
        String chapterUrl = BaseUtils.getUrl(chapterInfo.getChapterHref(), crawlRule.getCrawlUrl());
        //取出规则中的小说内容页面数，如果是多页，则循环调用，如果不是，执行一次
        String content = "";
        for (int i = 1; i <= Integer.parseInt(totalPage); i++) {
            if (crawlRule.getBookContentHref().indexOf("{" + totalPage + "}") != -1) {
                String target = i == 1 ? ".html" : "_" + (i - 1) + ".html";
                chapterUrl = chapterUrl.replace(target, "_" + String.valueOf(i) + ".html");
            }

            String bookContentUrl = BaseUtils.getUrl(chapterUrl, crawlUrl);
            String body = "";
            try {
                body = crawlHandler.crawlBody(bookContentUrl);
            } catch (IOException e) {
                e.printStackTrace();
            }
            String matcherResult = CrawlUtils.simpleCrawlRulePattern(body, crawlRule.getBookContentPattern());
            content = content + PatternUtils.replaceLabel(crawlRule.getReplaceLabel(), matcherResult);

        }

        chapterInfo.setContent(content);
        chapterTos.add(chapterInfo);
        log.debug("爬取到章节：" + chapterInfo.getChapterName());
    }

    //更新小说章节
    public void renewNovel() {
        Result<List<BookTo>> result = bookFeignService.listAllBook();
        List<BookTo> bookTos = result.getData();
        //从数据库中查询小说爬虫源
        ThreadPoolExecutor threadPoolExecutor = threadPoolConfig.threadPoolExecutor();
        for (BookTo bookTo : bookTos) {
            //TODO 对于不同的小说使用多线程调用
            /*CompletableFuture.runAsync(()->{
                try {
                    crawlChapter(bookTo);
                } catch (IOException e) {
                    e.printStackTrace();
                }
            });*/
            CustomFuture.runExec(() -> crawlChapter(bookTo), threadPoolExecutor);
            crawlChapter(bookTo);
        }
    }

    //爬取小说章节
    public void crawlChapter(BookTo bookTo) {
        CrawlVo crawlVo = crawlSourceService.getCrawlSourceById(bookTo.getCrawlSourceId());
        CrawlRule crawlRule = crawlVo.getCrawlRule();
        //爬取小说的章节信息，并封装到map集合中返回
        List<BookChapterTo> chapterInfoList = null;
        try {
            chapterInfoList = indexInfo(crawlRule, bookTo);
        } catch (IOException e) {
            log.error("在执行crawlTaskService.indexInfo(crawlRule, bookTo);方法是出现错误");
        }
        if (CollectionUtils.isEmpty(chapterInfoList)) {
            return;
        }
        CrawlTask crawlTask = new CrawlTask();
        //从数据库中查询当前库中存储的最后一章的信息，并从这章开始爬取
        crawlTask.setChapterInfos(chapterInfoList);
        crawlTask.setBookId(bookTo.getCrawlBookId());
        crawlTask.setCrawlSourceId(bookTo.getCrawlSourceId());
        crawlTask.setCrawlRule(crawlRule);
        //TODO 小说的总字数需要修改
        //根据key和value执行消息队列，通过url查询
        rabbitTemplate.convertAndSend(RabbitConstant.CRAWL_TASK_EXCHANGE, RabbitConstant.CRAWL_TASK_ROUTING_KEY, crawlTask);
    }

    @Override
    public void allAvailableUrl(CrawlVo crawlVo,Map<String,List<String>> idMap) throws IOException {
        List<String> availableUrl = new ArrayList<>();
        CrawlRule crawlRule = crawlVo.getCrawlRule();
        Integer crawlId = crawlVo.getId();
        //查詢數據庫中是否有該小說的記錄存在，如果有該信息，則在無需添加到集合中
        List<LastCrawlIndex> lastCrawlIndices = bookFeignService.lastIndexList(crawlId).getData();
        //將得到的集合改存為Map集合
        if (CollectionUtils.isEmpty(lastCrawlIndices)){
            lastCrawlIndices = new ArrayList<>();
        }
        Map<String, String> bookIdMap = lastCrawlIndices.stream().map(lastCrawlIndex -> {
            availableUrl.add(lastCrawlIndex.getCrawlBookId());
            return lastCrawlIndex;
        }).collect(Collectors.toMap(LastCrawlIndex::getCrawlBookId, LastCrawlIndex::getBookName));
        for (Integer i = 0; i < 1000; i++) {
            crawlAvailableSource(crawlVo, availableUrl, bookIdMap, i);
        }
        idMap.put(crawlVo.getSourceName(),availableUrl);
        return;
    }
    @Async
    public void crawlAvailableSource(CrawlVo crawlVo, List<String> availableUrl, Map<String, String> bookIdMap, Integer i) throws IOException {
        BookTo bookTo = novelInfo(i.toString(), crawlVo.getId(), crawlVo.getCrawlRule());
        //判斷是否在已有的庫中，如果有，
        String bookName = bookIdMap.get(i.toString());
        if (!StringUtils.isEmpty(bookName)){
            return;
        }
        if (!ObjectUtils.isEmpty(bookTo)){
            log.info(crawlVo.getSourceName()+"中的第"+ i +"页中包含有用资源");
            availableUrl.add(bookTo.getCrawlBookId());
            //将这一页的信息添加到爬虫队列中
            crawlNovelByCrawlId(i.toString(), crawlVo,bookTo);
        }
    }

    @Override
    public void chapterFilterWord(String keyword) {

    }


}
