package com.zbl.controller;

import com.xuxueli.crawler.XxlCrawler;
import com.xuxueli.crawler.conf.XxlCrawlerConf;
import com.xuxueli.crawler.parser.PageParser;
import com.xuxueli.crawler.util.FileUtil;
import com.zbl.dto.JDItems;
import com.zbl.vo.JDItemVo;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.springframework.data.mongodb.core.MongoTemplate;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import javax.annotation.Resource;
import java.util.HashSet;
import java.util.Set;

/**
 * @Author: zhangjun
 * @Date: 2020/7/28 17:35
 * @Description:
 */
@RestController
@RequestMapping("/single/page")
public class SinglePageCrawlerController {
    @Resource
    private MongoTemplate mongoTemplate;

    @RequestMapping("/craw")
    public String craw() {
        //使用phantomjs引擎
//        String driverPath = "D:/software/phantomjs-2.1.1-windows/bin/phantomjs.exe";

        XxlCrawler crawler = new XxlCrawler.Builder()
            .setUrls("https://mall.jd.com/view_search-407907-13637358-99-1-20-1.html") //京东-康佳商品列表页
//            .setUrls("http://product.dangdang.com/28989329.html")//当当网
//            .setUrls("https://item.jd.com/12228194.html")
            .setAllowSpread(false)
//            .setPageLoader(new HtmlUnitPageLoader())        // HtmlUnit 版本 PageLoader：支持 JS 渲染
//            .setPageLoader(new SeleniumPhantomjsPageLoader(driverPath))        // "selenisum + phantomjs" 版本 PageLoader：支持 JS 渲染
            .setPageParser(new PageParser<JDItemVo>() {
                @Override
                public void parse(Document html, Element pageVoElement, JDItemVo pageVo) {
                    // 解析封装 PageVo 对象
                    String pageUrl = html.baseUri();
                    System.out.println(pageUrl + "：" + pageVo.toString());

                    // 保存图片
                    String filePath = "D:\\images";
                    if (pageVo.getImages()!=null && pageVo.getImages().size() > 0) {
                        Set<String> imagesSet = new HashSet<>(pageVo.getImages());
                        int index = 0;
                        for (String img: imagesSet) {

                            // 下载图片文件
                            String fileName = FileUtil.getFileNameByUrl(img, null);
                            boolean ret = FileUtil.downFile(img, XxlCrawlerConf.TIMEOUT_MILLIS_DEFAULT, filePath, fileName);
                            System.out.println("down images " + (ret?"success":"fail") + "：" + img);

                            JDItems items = new JDItems();
                            items.setTitle(pageVo.getTitles().get(index));
                            items.setImage(img);
                            //保存到MongoDB
                            System.out.println(">>>>>>>> " + items);
                            mongoTemplate.save(items);
                            index++;
                        }
                    }
                }
            })
            .build();
        crawler.start(true);

        return "ok";
    }
}
