package com.wgu.crawl.service.impl;

import com.wgu.crawl.CrawlBook;
import com.wgu.crawl.CrawlSite;
import com.wgu.crawl.CrawlType;
import com.wgu.crawl.service.CrawlBookService;
import com.wgu.crawl.service.CrawlService;
import com.wgu.crawl.service.CrawlSiteService;
import com.wgu.crawl.service.CrawlTypeService;
import com.wgu.crawl.util.CrawlUtil;
import lombok.extern.slf4j.Slf4j;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.data.domain.Pageable;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.List;

/**
 * @Author: w
 * @Date: 2019/6/27 14:24
 * @Version 1.0
 */
@Slf4j
@Service
public class CrawlServiceImpl implements CrawlService{
    @Resource
    private CrawlSiteService crawlSiteService;
    @Resource
    private CrawlTypeService crawlTypeService;
    @Resource
    private CrawlBookService crawlBookService;

    @Override
    @Async
    public void crawlChapter(CrawlBook crawlBook) {

    }

    @Override
    public List<CrawlType> crawlType(String url) {
        CrawlSite site = new CrawlSite();
        site.setCharset("GBK");
        site.setName("全本小说网");
        site.setUrl(url);
        site=crawlSiteService.save(site);

        // 下面是爬虫 输入
        Elements es = CrawlUtil.analy(url,"GBK",".tagdh_02 a");
        List<CrawlType> list = new ArrayList<>();
        for(Element element:es){
            if(!element.text().equals("首页")&&!element.text().equals("排行榜")){
                CrawlType type = new CrawlType();
                type.setName(element.text());
                type.setSiteId(site.getId());
                type.setTypeUrl(element.absUrl("href"));
                crawlTypeService.save(type);
                list.add(type);
            }
        }
        return list;
    }

    @Override
    @Async
    public void crawlBook(CrawlType crawlType) {
        String typeUrl = crawlType.getTypeUrl();
        // 爬取分页数
        List<Element> list = CrawlUtil.analy(typeUrl,"GBK","#pagestats");
        // 默认选取集合第一个  结果为: 1/17 再以 / 分割为数组
        String[] page = list.get(0).text().split("/");
        // 获取真正分页数 如：17
        int total = Integer.parseInt(page[1]);
        for(int i=1;i<=total;i++){
            //组装当前分页的URL 默认typeUrl:  http://www.quanben.co/sort/1_1.html
            // 获取typeUrl的前面半部分  以 “-” 分割
            String front = typeUrl.split("_")[0];
            //组装成 http://www.quanben.co/sort/1_2.html  http://www.quanben.co/sort/1_3.html ...
            typeUrl = front+"_"+i+".html";
            log.info("typeUrl:{}",typeUrl);
            //爬取当前页面的详细内容 书名之类的
            crawlBook(crawlType.getId(),typeUrl);
        }
    }


    /**
     * 根据typeUrl 爬取书籍信息
     * @param typeId  类型id 保存书籍进入数据库时有用
     * @param typeUrl
     */
    private void crawlBook(Long typeId,String typeUrl){
        Elements es = CrawlUtil.analy(typeUrl,"GBK",".Sum");
        es.forEach(element -> {
            // 获取 tag 为 a的所有信息
            ArrayList<Element> arrayList = element.getElementsByTag("a");
            //获取图片url
            Element e1 = arrayList.get(0).getElementsByTag("img").get(0);
            // 获取书籍名称 及 书籍 url
            Element e2 = arrayList.get(1);
            // 获取书籍作者信息
            Element e3 = arrayList.get(2);
            CrawlBook book = new CrawlBook();
            book.setAuthor(e3.text());
            book.setAuthorUrl(e3.absUrl("href"));
            book.setBookImgUrl(e1.absUrl("src"));
            book.setBookUrl(e2.absUrl("href"));
            book.setName(e2.text());
            book.setTypeId(typeId);
            crawlBookService.save(book);
        });
    }
}
