'use strict'
// 爬取数据

const Crawler = require('crawler');
//const article = require('../model/article');
const http = require('http');
const path = require('path');
const fs = require('fs');
const request = require('request');
const url = 'https://www.cnblogs.com'

const arr = [];

const crawler = new Crawler({
    maxConnections: 100,
    callback: function (error, res, done) {
        if (error) {
            console.log(`错误信息：${error}`)
        } else {
            var $ = res.$;
            var list = $('#post_list .post_item').toArray();
            console.log(list.length);
            // list.forEach(element => {
            //     var txt = $(element);
            //     var title = txt.find('.post_item_body h3 a').text();
            //     var authortime = txt.find('.post_item_body .post_item_foot ').text().trim()//.replace(/[\n\r]/g, '');
            //     var author = authortime.split('发').shift().replace(/[\n\r]/g, '');
            //     var releasetime = authortime.match(/[ \n\r]/, '').input.split('\r\n')[1].split('于')[1]
            //     console.log(title);
            //    // article.sync({ force: true }).then(() => {
            //         article.create({
            //             title: `《${title}》`,
            //             author: author,
            //             releasetime: releasetime
            //         }).then((row) => {
            //             console.log(`添加到数据库的Id为：${row.id}`);
            //         })
            //    // })
            // });

            // 下载作者头像
            $('#post_list img').each(function (i, elem) {
                var letimg = $(this).attr('src');//https://pic.cnblogs.com/face/1752549/20200315200934.png 
                // var ent = letimg.split('.').pop();// png
                arr.push(letimg);
                // var name = letimg.match(/([^/]+)$/, '')[1].replace(/([^\d]+)$/, '');
            });
            for (var i = 0; i < arr.length; i++) {
                // var ent = arr[i].split('.').pop();// png
                // 下载
                request(arr[i]).pipe(fs.createWriteStream(path.join(__dirname, 'img', i + '.' + 'png' || 'jpg'), {
                    'enconding': 'binary'
                }))
                console.log("添加:" + arr[i]);
            }
        }
        done();
        console.log("添加:" + arr.length);
    }
})

//crawler.queue(url)

module.exports = (url) => {
    crawler.queue(url)
}
