let path = require('path');
let url = require('url');
let https = require('https');
let fs = require('fs');
let iconv = require('iconv-lite');
let cheerio = require('cheerio');


function download(crawUrl){ 
    const urlParsed = url.parse(crawUrl);
    return new Promise((resolve, reject)=>{
        let req = https.request({
          method: 'GET',
          hostname: urlParsed.hostname,
          path: urlParsed.path,
          headers: {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
          }
        }, function (res) {
            var arr = [];
            res.on("data", function (chunk) {
                arr.push(chunk);
            });
    
            res.on('error', (e) => {
                reject(e);
            })
    
            res.on("end", function () {
                var value = Buffer.concat(arr);
                resolve(value);
            });
        });
        req.end();
      });
}

// URL管理
class UrlManager {
    constructor(){
      this.urls = [];
      this.crawedUrl = [];
    }

    hasNewUrl(){
        return this.urls.length>=1;
    }
  
    isCrawed(url){
        return this.crawedUrl.indexOf(url)!==-1;
    }
  
    isExist(url){
        return this.urls.indexOf(url)!==-1;
    }
  
    addNewUrl(newUrl){
      if(!newUrl){
        return;
      }
      if(!this.isCrawed(newUrl) && !this.isExist(newUrl)){
        this.urls.push(newUrl);
      }
  
      this.onAddNewUrl && this.onAddNewUrl();
    }
  
    addNewUrls(urls){
      if(!urls || urls.length==0){
        return;
      }
      for(var i=0; i<urls.length; i++){
            this.addNewUrl(urls[i]);
      }
    }
  
    getNewUrl(){
        var url =  this.urls.pop();
        this.crawedUrl.push(url);
        return url;
    }
  }
  
//页面解析 获取新链接和图片信息
  const get_new_urls = ($) => {

    const reg = /^(https:\/\/acg12\.com)?\/(\d+)\/$/;
  
    const links = $("div.poi-g_lg-1-4 div.inn-widget__author-posts__container a")
        .toArray()
        .map(ele=>$(ele).attr("href").trim())
        .filter(text => reg.test(text))
    return links;
  }
  
  const get_new_data = ($, urlAddress) => {
  
    let pic_nodes = $('div.inn-singular__post__body > div.inn-singular__post__body__content > div > a > img');
    let res_datas = [];
    for(let i = 0; i < pic_nodes.length; i++){
      let pic_node = $('div.inn-singular__post__body > div.inn-singular__post__body__content > div > a > img').eq(i);
      if(!pic_node || !pic_node.attr("src")){
          return [];
      }
  
      var res_data={};
      var src = pic_node.attr("src");
      if(src.lastIndexOf('.jpg') === -1){
        src += '.jpg';
      }
      console.log(src);
      res_data['picUrl'] = src;
  
  
      res_data['mmId'] = src.slice(41, src.indexOf(".jpg"));
      res_data['title'] = src.slice(41);
  
      res_datas.push(res_data);
    }
    return res_datas;
  }
  
  const parse = (html_cont, url) => {
    if(!html_cont){
      return null;
    }
    var $ = cheerio.load(html_cont);
    var new_urls = get_new_urls($, url);
    var new_data = get_new_data($, url);
  
    return {
      dataList: new_data,
      newUrls: new_urls,
    };
  }
  
  //通过链接下载图片 写入
  class ResultOutput {

    constructor() {
      this.results = [];
      this.distPath = path.resolve(process.cwd(), './dist');//process.cwd()返回Node.js进程的当前工作目录 path.resolve将路径解析为绝对路径
      if (!fs.existsSync(this.distPath)) {//如果路径不存在 创建路径
        fs.mkdirSync(this.distPath);
      }
      this.distPath = path.resolve(process.cwd(), './dist/images');
      if (!fs.existsSync(this.distPath)) {
        fs.mkdirSync(this.distPath);
      }
    }
  
    addResults(resultsArg) {
      this.results = [...this.results, ...resultsArg];
    }
  
    saveImgToFile(res, buffer) {
      const imgDist = path.resolve(this.distPath, res.mmId);
      if (!fs.existsSync(imgDist)) {
        fs.mkdirSync(imgDist);
      }
      const imgFileName = path.resolve(imgDist, path.basename(res.picUrl));//path.basename() 方法返回 path 的最后一部分
      const descFileName = path.resolve(imgDist, 'desc.txt');
      fs.writeFileSync(descFileName, res.title);//同步写入
      fs.writeFileSync(imgFileName, buffer);
    }
  
  
    startDownload() {
      console.log(this.results);
  
      const fetchImg = (urlInfoList, index = 0) => {
        const urlInfo = urlInfoList[index];
        if (urlInfo) {
          return download(urlInfo.picUrl)
            .then(buffer => {
              console.log('Progress: ' + index + '/' + urlInfoList.length);
              this.saveImgToFile(urlInfo, buffer);
              return fetchImg(urlInfoList, index + 1);
            })
        } else {
          return Promise.resolve();
        }
      }
  
      return fetchImg(this.results).then(()=>{//写入json文件
        const imgListInfo = this.results.map(item => {
          return {
            // ...item,
            title: item.title,
            desc: item.desc,
            fileName: `./images/${item.mmId}/${path.basename(item.picUrl)}`,
          };
        });
        const imgListInfoFileName = path.resolve(this.distPath, 'imgListInfo.json');
        fs.writeFileSync(imgListInfoFileName, JSON.stringify(imgListInfo,null,2));
      });
    }
  }
  
  const PROMISE_RUN_NUM = 5;
  const CRAW_MAX_COUNT = 13;

  class Crawler {

    constructor({rootUrl} = {}){
      this.rootUrl = rootUrl;
      this.urlManager = new UrlManager();
      this.urlManager.addNewUrl(rootUrl);
      this.resultOutput = new ResultOutput();
      this.runningPromisePool = [];
  
      this.crawIndex = 0;
  
      this.craw = this.craw.bind(this);
      this.waitAllPromiseOver = this.waitAllPromiseOver.bind(this);
  
      this.urlManager.onAddNewUrl = this.craw;
    }
  
    getRunningPromise(){
      return this.runningPromisePool.filter(item => !!item);
    }
  
    craw(){
  
      for(let i=0; i<PROMISE_RUN_NUM; i++){
        if(this.runningPromisePool[i]) {
          continue;
        }
        if(this.urlManager.hasNewUrl() && this.crawIndex <= CRAW_MAX_COUNT){
          let newUrl = this.urlManager.getNewUrl();
          let index = this.crawIndex++;
          console.log(`Crawing ${index}: ${newUrl}`);
          this.runningPromisePool[i] = download(newUrl)
            .then(data => {
              console.log(`Crawed ${index}`);
              const { dataList, newUrls } = parse(iconv.decode(data, 'utf-8'), newUrl) || { dataList: [], newUrls: []};
              this.resultOutput.addResults(dataList);
              this.urlManager.addNewUrls(newUrls);
            })
            .then(()=>{
              this.runningPromisePool[i] = null;
            },(err)=>{
              this.runningPromisePool[i] = null;
              console.log(err);
            });
        }else{
          break;
        }
      }
    }
  
    waitAllPromiseOver(){
      const runningPromise = this.getRunningPromise();
      if(runningPromise.length){
        return Promise.all(runningPromise).then(this.waitAllPromiseOver);
      }else{
        return Promise.resolve();
      }
    }
  
    startCrawler(){
      console.log('Start Craw:');
      this.craw();
      return this.waitAllPromiseOver();
    }
  
    startDownload(){
      this.resultOutput.startDownload();
    }
  }
  
const rootSeedUrl = 'https://acg12.com/311113/';

const crawler = new Crawler({
  rootUrl: rootSeedUrl,
});

crawler.startCrawler().then(()=>{
  console.log('爬虫获取链接完成');
  console.log('爬虫开始下载图片');
  return crawler.startDownload();
}).catch((err)=>{
  console.log(err);
});
