/*
 * Copyright (C) GSX Techedu Inc. All Rights Reserved
 * Unauthorized copying of this file, via any medium is strictly prohibited
 * Proprietary and confidential
 */

package com.nime.novel.crawl.core.list.impl;

import com.google.common.collect.Lists;
import com.nime.novel.crawl.constant.ChannelEnum;
import com.nime.novel.crawl.constant.CrawConstant;
import com.nime.novel.crawl.constant.StatusEnum;
import com.nime.novel.crawl.core.list.UrlGenService;
import com.nime.novel.crawl.domain.CrawlUrl;
import com.nime.novel.crawl.utils.UrlUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.codec.digest.DigestUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.stereotype.Service;

import java.util.List;

/**
 * @author liujialiang
 * @description
 * @team wuhan operational dev.
 * @date 2020/8/28 11:30 下午
 **/
@Slf4j
@Service
public class BookUrlGenServiceImpl extends UrlGenService {

    private final static String FIRST = "http://m.xdingdiann.com/sort/0/1.html";

    @Override
    protected CrawlUrl getNewestUrl(Integer policyId) {
        CrawlUrl crawlUrl = crawlUrlService.getOldCrawlUrlByStatus(policyId, ChannelEnum.BOOK_LIST.getCode(), StatusEnum.TODO);
        if (null != crawlUrl) {
            return crawlUrl;
        }
        log.info("暂时没有待爬取的URL: policy={} channel={}", policyId, ChannelEnum.BOOK_LIST.getCode());
        // 是否有正在爬取的页面
        CrawlUrl crawling = crawlUrlService.getOldCrawlUrlByStatus(policyId, ChannelEnum.BOOK_LIST.getCode(), StatusEnum.DOING);
        if (null != crawling) {
            log.info("爬取中{}...", crawling.getUrl());
            return null;
        }
        CrawlUrl failCrawUrl = crawlUrlService.getNewCrawlUrlByStatus(policyId, ChannelEnum.BOOK_LIST.getCode(), StatusEnum.FAIL);
        if (null != failCrawUrl) {
            return failCrawUrl;
        }
        CrawlUrl latestCrawUrl = crawlUrlService.getNewCrawlUrlByStatus(policyId, ChannelEnum.BOOK_LIST.getCode(), StatusEnum.DONE);
        return urlBuilder(policyId, latestCrawUrl);
    }

    @Override
    protected List<String> parseUrl(String html) {
        List<String> urls = Lists.newArrayList();
        Document doc = Jsoup.parse(html);
        Elements elements = doc.select("div.hot_sale > div.bookinfo");
        if (elements.size() == 0) {
            return urls;
        }
        for (Element item : elements) {
            urls.add(item.getElementsByTag("a").attr("href"));
        }
        return urls;
    }


    @Override
    protected boolean saveNewUrlList(CrawlUrl crawlUrl, List<String> urlList)
        throws Exception {
        // 生成书籍首页url
        String host = UrlUtil.getDomain(crawlUrl.getUrl());
        String prefix = "http://" + host;
        for (String url : urlList) {
            String bookHomeUrl = prefix + url;
            String chapterListUrl = bookHomeUrl + "all.html";
            Long bookId = insert(crawlUrl.getPolicyId(), crawlUrl.getReferId(), bookHomeUrl, ChannelEnum.BOOK);
            insert(crawlUrl.getPolicyId(), bookId, chapterListUrl, ChannelEnum.CHAPTER_LIST);
        }
        return false;
    }

    private CrawlUrl urlBuilder(Integer policyId, CrawlUrl crawlUrl) {
        String latestUrl;
        if (null == crawlUrl) {
            latestUrl = FIRST;
        } else {
            String index = crawlUrl.getUrl().substring(crawlUrl.getUrl().lastIndexOf("/") + 1, crawlUrl.getUrl().lastIndexOf(".html"));
            String baseUrl = crawlUrl.getUrl().substring(0, crawlUrl.getUrl().lastIndexOf("/") + 1);
            int page = Integer.parseInt(index) + 1;
            latestUrl = baseUrl + page + ".html";
        }
        String urlHash = DigestUtils.md5Hex(latestUrl);
        log.info("生成新的url:{}", latestUrl);
        CrawlUrl newCrawlUrl = CrawlUrl.builder()
            .url(latestUrl)
            .channel(ChannelEnum.BOOK_LIST.getCode())
            .policyId(policyId)
            .crawlStatus(StatusEnum.TODO.getCode())
            .referId(0L)
            .urlHash(urlHash)
            .build();

        crawlUrlService.create(newCrawlUrl);
        crawlUrlService.updateRoute(newCrawlUrl.getId(), "0-" + newCrawlUrl.getId());
        return newCrawlUrl;
    }

}
