const cheerio = require('cheerio')
const rewire = require('rewire')
const {sleep, uuid} = rewire('../common/utils.cjs')
const {composeAsync, collectParamsAndResult, parallelMap, logParams, errorRetry, whenNoError, parallelIter} = rewire('../common/combinator.cjs')
const {makeFetcher, header, baseURL, wrapCache, wrapProxy} = rewire('../common/fetch.cjs')
const R = require('ramda')
const {wrapFnState} = rewire('../common/fnstate.cjs')
const {getLogger} = rewire('../common/logger.cjs')
const {getDb, getCosClient} = rewire('../state.cjs')
const htmlParser = rewire('../common/htmlParser.cjs')
const dbutils = rewire('../common/dbutils.cjs')
const {downloadAndUpload} = rewire('../common/fileRemote.cjs')

const logger = getLogger('xiachufang')


function getHeaders() {
    return {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
        "Cookie": 'bid=gSMDmyiz; __utmz=177678124.1691570653.1.1.utmcsr=bing|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22189d977bde4706-0589ac0fac69c2-26031c51-2073600-189d977bde57c2%22%2C%22%24device_id%22%3A%22189d977bde4706-0589ac0fac69c2-26031c51-2073600-189d977bde57c2%22%2C%22props%22%3A%7B%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; __utma=177678124.1998589192.1691570653.1691580136.1691743404.4; __utmc=177678124; __utmt=1; Hm_lvt_ecd4feb5c351cc02583045a5813b5142=1691570651,1691580136,1691743404; __gads=ID=f651ccc079d50239-2278302394e30076:T=1691570643:RT=1691743403:S=ALNI_MaMws2sch0BUh2729UT_XFGWiNgDg; __gpi=UID=00000c29a6eeaee6:T=1691570643:RT=1691743403:S=ALNI_MYieXG6VELqMAyvY6uo5MC_n43SZw; __utmb=177678124.24.10.1691743404; Hm_lpvt_ecd4feb5c351cc02583045a5813b5142=1691743467'
    }
}

const domain = "https://www.xiachufang.com"

let vedengFetch = wrapProxy(
    baseURL(
        header(makeFetcher('get'), getHeaders), 
        domain
    ),
    60
)

function makeFetch(validFn) {
    return collectParamsAndResult(
        wrapCache(
            errorRetry(
                composeAsync(
                    logParams(vedengFetch),
                    validFn
                ), 
                10, 
                error => logger.error(error.message), 
                1000
            ), 
            domain, null, async _ => await sleep(100)
        ), 
        ['url', 'fetchResult']
    )
}

/**
 * fetch方法参数是url
 */
const fetch = makeFetch(validHtml)

function validHtml(data) {
    const html = data.data
    if (!(html.includes("page-container"))) {
        throw new Error("页面不正常!!!")
    }
    return data
}

/**
 * 所有分类
 * @returns 所有分类
 */
async function getAllCategory() {
    const url = "https://www.xiachufang.com/category/"

    const {fetchResult} = await fetch(url)
    const html = fetchResult.data
    const $ = cheerio.load(html)

    // 获取当前页的URL
    const urls = htmlAttrs($, '.cates-list a', 'href')

    return urls.filter(url => !url.includes('javascript:'))
}

/**
 * 检查detail页面
 * @param data 
 * @returns 
 */
function checkDetail(data) {
    const html = data.data
    if (html.includes('container pos-r pb20 has-bottom-border')) {
        return data
    }
    if (html.includes("404~~你访问的页面找不到")) {
        logger.info("碰到404了")
        return data
    }
    logger.info(data.data)
    throw new Error("check detail fail!")
}


/**
 * 获取下一页的URL
 * @param  $ 
 * @returns 
 */
function nextPageUrl($) {
    const nextUrl = htmlAttrs($, '.pager a.next', 'href')
    if (nextUrl.length == 0) {
        return null;
    }
    return nextUrl[0]
}

// 获取列表页中所有的详情链接
function getDetailUrls($) {
    const urls = htmlAttrs($, '.normal-recipe-list li > div > a', 'href')
    if (urls.length == 0) {
        return []
    }
    return urls
}

/**
 * 将详情URL解析出来
 * @param {} data 
 * @returns 
 */
async function parseAllDetailUrl({fetchResult}) {
    logger.info("正在解析详情URL")
    const html = fetchResult.data
    const $ = cheerio.load(html)

    // 获取当前页的URL
    const urls = getDetailUrls($)
    if (urls.length == 0) {
        return []
    }

    const nextUrl = nextPageUrl($)
    if (nextUrl == null) {
        return urls;
    }
    const data2 = await fetch(nextUrl)
    const residualUrls = await parseAllDetailUrl(data2)
    return [...urls, ...residualUrls]
}

/**
 * 获取html中的css属性
 */
function htmlAttrs($, css, attr) {
    let rsList = []
    $(css).each(function(i, elem) {
        rsList[i] = $(this).attr(attr)
    })

    return rsList
}

const fetchDetail = composeAsync(
    makeFetch(checkDetail),
    whenNoError((_1, _2) => { logger.info('访问结束')}, (error, _) => logger.info(error))
)

const fetchList = composeAsync(
    fetch,
    parseAllDetailUrl,
    R.partial(parallelMap, [10, fetchDetail])
)


async function parseData() {
    const parseItem = item => {
        const html = item.html
    
        const parseRule = htmlParser.composeO({
            'title': htmlParser.textBy('h1.page-title'),
            'desc': htmlParser.textBy('.recipe-show .desc'),
            'cover_img': htmlParser.attrBy('div.cover.image img', 'src'),
            'author_source_id': htmlParser.attrBy('.author a', 'href'),
            'author_name': htmlParser.attrBy('.author a', 'title'),
            'composition': htmlParser.mapListBy('.ings tr', htmlParser.composeL(htmlParser.textBy('td.name'), htmlParser.textBy('td.unit'))),
            'step_title': htmlParser.textBy('h2#steps'),
            'step_list': htmlParser.mapListBy('.steps .container', htmlParser.composeO({text: htmlParser.textBy('.text'), img: htmlParser.attrBy('img', 'src')}))
        })
    
        const result = htmlParser.parse(html, parseRule)
        const saveData = {
            id: uuid(),
            url: item.url,
            spider_id: item.id,
            title: result.title,
            desc: result.desc,
            cover_img: result.cover_img,
            author_source_id: result.author_source_id,
            author_name: result.author_name,
            composition: JSON.stringify(result.composition),
            step_title: result.step_title,
            step_list: JSON.stringify(result.step_list),
        }
        
        return saveData
    }

    let page = 0
    while(true) {
        console.log(page)
        const sql = "select * from spider_log where `domain` = 'https://www.xiachufang.com' and `url` like '%/recipe/%%' and `html` <> '' limit " + (page * 1000) + ", 1000"
        page++
        const data = await getDb().sql(sql).execute()
        if (R.isEmpty(data)) {
            break
        }

        // 已存在的URL将会删掉
        const urls = R.map(item => item['url'], data)
        await getDb()
            .sql('delete from cookbook where `url` in (?)')
            .params([ urls ])
            .execute();

        const toSaveData = R.map(parseItem, data)
        await dbutils.insertAll(getDb(), 'cookbook', toSaveData)

    }
}

async function handleMedia() {
    async function oneTask(page) {
        logger.info("处理page: " + page)
        const sql = "select * from cookbook limit " + (page * 1000) + ", 1000"
        const data = await getDb().sql(sql).execute()
        
        if (R.isEmpty(data)) {
            return
        }
        // 查出所有图片，包括步骤图和封面图的URL
        let imgUrls = R.map(item => {
            let urls = []

            let coverUrls = []
            if (item.cover_img) {
                coverUrls = [item.cover_img]
            }

            const stepList = JSON.parse(item.step_list)
            if (R.isEmpty(stepList)) {
                urls = coverUrls
            }
            else {
                const stepUrls = R.map(info => info.img, stepList)
                urls = R.concat(coverUrls, stepUrls)
            }
            return urls
        }, data)

        imgUrls = R.flatten(imgUrls)

        // 处理URL
        imgUrls = R.filter(url => !R.isEmpty(url), imgUrls)

        imgUrls =  R.map(
            url => {
                const pies = url.split("?")
                return pies[0].trim()
            },
            imgUrls
        )

        // 筛选出不在数据库的数据
        imgUrls = await dbutils.notInTable(getDb(), 'xiachufang_media', 'source_url', imgUrls)

        console.log("num: " + imgUrls.length)

        const handleFn = whenNoError(
            errorRetry(handleImg, 2), 
            (_1, _2) => {}, 
            (e, _) => logger.error(e)
        )
        await parallelIter(12, handleFn, imgUrls)
    }

    async function handleImg(url) {
        logger.info("正在处理: " + url)

        // 下载并上传图片
        const cos = getCosClient()
        const bucket = 'cookbook-1308882374'
        const region = 'ap-shanghai'
        const path = await downloadAndUpload(url, 'images-deep', cos, bucket, region, 'DEEP_ARCHIVE')

        // 保存数据库
        const info = {
            source_url: url,
            cos_path: path,
            ctime: new Date().getTime()
        }
        await getDb().insert("xiachufang_media", info).execute()
    }

    const sql = "select count(1) as cnt from cookbook"
    let totalRecord = await getDb().sql(sql).execute()
    totalRecord = totalRecord[0]['cnt']
    const totalPage = Math.floor(totalRecord / 1000) + 1
    const fn = wrapFnState("xiachufang_imgtask", 1, oneTask, (e, _) => logger.error(e))
    await fn(R.range(0, totalPage))
}

async function main2() {
    // const urls = await getAllCategory()
    // logger.info(urls)

    // const getDetailList = composeAsync(fetch, parseAllDetailUrl)
    // const urls = await getDetailList('/category/52333/')
    // logger.info(urls.length)
    
    // const urls = await getAllCategory()
    // for (let index = 0; index < urls.length; index++) {
    //     const url = urls[index]
    //     await fetchList(url)
    // }

    // 执行主逻辑
    // const urls = await getAllCategory()
    // const fn = wrapFnState("xiachufang", 1, fetchList, (e, item) => logger.error(e))
    // await fn(urls)

    // 解析数据
    // await parseData()

    await handleMedia()

    logger.info("任务结束!!!!撒花")
}



main2()
