/*
 * @Version: 0.0.1
 * @Author: ider
 * @Date: 2020-05-09 13:32:52
 * @LastEditors: ider
 * @LastEditTime: 2021-08-05 21:34:19
 * @Description: 重新解析已存在的文件
 */


const cheerio = require('cheerio')
const Apify = require('apify')
const { log } = Apify.utils;
log.setLevel(log.LEVELS.DEBUG);

const extract = (html) =>{
    let retDict = {}
    let $ = cheerio.load(html)
    retDict.name = $('.cpuname').text().trim()
    $('.sectioncontainer  section.details').each((index, el) => {
        let top_key = $(el).find('h1').text().trim()
        if (['Physical','Performance','Architecture','Cores'].indexOf(top_key)>-1){
            retDict[top_key] = {}
            $(el).find('table tr').each((_,elj)=>{
                let key = $(elj).find('th').text().replace(/:/g,'').trim()
                let value = $(elj).find('td').text().trim()
                retDict[top_key][key] = value
            })
        }
    });
    return retDict
}


Apify.main(async () => {
    // Create and initialize an instance of the RequestList class that contains
    // a list of URLs to crawl. Here we use just a few hard-coded URLs.

    const htmlStore = await Apify.openKeyValueStore('cpu_detail_html');
    const ReGPUDetailStore = await Apify.openKeyValueStore('re_cpu_detail');

    await htmlStore.forEachKey(async (key, index, info) => {
        let html = await htmlStore.getValue(key)
        let retDict = extract(html)
        if (retDict.name !== ""){
            await ReGPUDetailStore.setValue(key,retDict);
        }else{
            console.log(key)
        }
    });



    console.log('Crawler finished.');
});