/*
 * @Version: 0.0.1
 * @Author: ider
 * @Date: 2020-05-07 21:25:11
 * @LastEditors: ider
 * @LastEditTime: 2020-05-08 09:23:59
 * @Description: 
 */

const cheerio = require('cheerio')
const Apify = require('apify')
const { log } = Apify.utils;
log.setLevel(log.LEVELS.DEBUG);
 // Prepare a list of URLs to crawl
Apify.main(async () => {


    const requestQueue = await Apify.openRequestQueue('mzt')
    await requestQueue.addRequest({ url: `https://www.mzitu.com`,})
    await requestQueue.addRequest({ url: `http://httpbin.org/headers`,})

    

    const store = await Apify.openKeyValueStore('some-name');
    const dataset = await Apify.openDataset('some-name');

    // Crawl the URLs
    const crawler = new Apify.PuppeteerCrawler({
        requestQueue,    
        launchPuppeteerOptions: {
            // For example, by adding "slowMo" you'll slow down Puppeteer operations to simplify debugging
            slowMo: 100,
            // proxyUrl: 'http://192.168.0.55:1080',
            // proxyUrl: 'http://192.168.0.160:6666',
            handleSIGINT: true, // so Chrome doesn't exit when we quit Node.
            headless: false, // to see what's happening
            ignoreHTTPSErrors: true
            //    executablePath: '/usr/bin/chromium',
            //  args: ['--ignore-certificate-errors','--no-sandbox','--proxy-server=127.0.0.1:44249']
            //  args: ['--ignore-certificate-errors','--no-sandbox','--proxy-server=192.168.1.224:8087']
          },
        maxRequestRetries: 10,
        handlePageTimeoutSecs: 30,
        maxConcurrency: 1,
        useSessionPool:true,
        gotoFunction: async ({ request, page }) => {
        //   await Apify.utils.puppeteer.addInterceptRequestHandler(page, (request) => {
        //     // if (['image', 'stylesheet', 'font', 'script'].indexOf(request.resourceType()) !== -1)
        //     if (['image'].indexOf(request.resourceType()) !== -1) { request.abort() } else { request.continue() }
        //   })
          // await Apify.utils.puppeteer.blockRequests(page, {urlPatterns:[".css", ".jpg", ".jpeg", ".png", ".svg", ".woff", ".pdf", ".zip"]})
          return Apify.utils.puppeteer.gotoExtended(page, request, { waitUntil: 'networkidle2', timeout: 30000 })
        },

        handlePageFunction: async ({ request,response,page }) => {
            // This function is called to extract data from a single web page
            // 'page' is an instance of Puppeteer.Page with page.goto(request.url) already called
            // 'request' is an instance of Request class with information about the page to load
            // await page.goto('http://www.qq.com', {waitUntil: 'networkidle2'});
            // await page.pdf({path: 'hn.pdf', format: 'A4'});
            // await page.waitFor('.gallery-image-high-res.loaded')
            // await page.focus('.gallery-image-high-res.loaded')
            // for (i = 1; i < 20; i++) {
            //   await page.evaluate(`document.querySelector('.section-layout.section-scrollbox').scrollTop=${i * 2000}`)
            //   await page.waitFor(500)
            // }
            // const ret_data = parse_img(await page.content())
            // if (ret_data && ret_data.size > 1) { await saveImg(request.userData, ret_data) }
            log.info(response.text())

          },
          
        handleFailedRequestFunction: async ({ request ,error}) => {
            log.debug(`Request ${request.url} failed twice.${error}`);
        },
    });

    await crawler.run();

});