const request = require('superagent')
require('superagent-charset')(request)
const cheerio = require('cheerio')
const download = require('download')
const mkdirp = require('mkdirp')
const async = require('async')
const URL = require("url")

// 保存地址
var savePath = './img/';
// 匹配URL
var UrlReg = new RegExp(/^https?:\/\/(([a-zA-Z0-9_-])+(\.)?)*(:\d+)?(\/((\.)?(\?)?=?&?[a-zA-Z0-9_-](\?)?)*)*$/i);
// link收集器
var globalLinkQueue = [];
// 爬取次数
var spiderCount = 0;
// 爬取次数限制
var spiderCountLimit = 2;

spider('https://www.tupianzj.com/meinv/xiezhen/')

// 校验是否是合法的href
function isValidHref(href) {
	return /\S+\.html/.test(href)
}

// 检索a标签的href
function fetchLinks({ $, selector, linksAry, title, link }) {
	let aTags = $(selector), $aTag, aHref, aTitle;
	let { domain, hrefPrev } = getHrefInfo(link)

	for (let i = 0; i < aTags.length; i++) {
		$aTag = $(aTags[i]);
		aHref = $aTag.attr('href');
		aTitle = title || $aTag.attr('title');

		if (String(aHref).startsWith('/')) {
			aHref = domain + aHref
		} else {
			aHref = hrefPrev + aHref
		}

		// 过滤：重复链接
		if (!isValidHref(aHref) || linksAry.some(d => d.href == aHref)) {
			continue;
		}

		linksAry.push({
			title: aTitle,
			href: aHref
		})
	}
}

// 获取当前链接的域名信息和拼接前缀
function getHrefInfo(href) {
	let { protocol, hostname } = URL.parse(href);
	let domain = protocol + '//' + hostname;
	return {
		hrefPrev: href.split('/').slice(0, -1).join('/') + '/',
		domain: domain
	}
}
// 请求url
function req(url) {
	return new Promise((rev, rej) => {
		request('GET', url)
			.charset('gbk')
			.buffer(true)
			.set('Cookie', 't=cdd3345174aba64147a802b43a0999e6; r=8221; Hm_lvt_f5329ae3e00629a7bb8ad78d0efb7273=1667359683,1667462127,1667525488; Hm_lpvt_f5329ae3e00629a7bb8ad78d0efb7273=1667538578')
			.set('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36')
			.end(function (err, res) {
				if (err) {
					rej(err)
					return
				}

				rev(res)
			})
	})
}

// 爬虫
function spider(fetch_link) {
	console.log(`开始第${++spiderCount}次入口爬虫...`)
	req(fetch_link).then(res => {
		let linksAry = [];
		fetchLinks({
			$: cheerio.load(res.text),
			selector: '.list_con_box ul>li>a[href]',
			linksAry: linksAry,
			link: fetch_link
		});
		console.log('第一次抓取到的链接：\n', linksAry)


		async.mapLimit(linksAry, 1, async function ({ href, title }) {
			// 存在title：代表是抓取图片的目标地址
			if (title) {
				// 创建目录
				mkdirp(savePath + title);
				let linksAry = [];
				let { text } = await req(href);
				let $$ = cheerio.load(text);
				fetchLinks({
					$: $$,
					selector: '.pages li>a',
					linksAry: linksAry,
					link: href,
					title: title, //因为当前抓的页面里可能存在额外的图片链接地址，是属于同一分类， 防止收集链接的时候title为undefined
				});

				linksAry.unshift({
					title: title,
					href: href
				});

				return linksAry;

			} else {
				// 加入集合
				if (!globalLinkQueue.some(d => d == href)) {
					globalLinkQueue.push(href)
				}
			}

		}, (err, linksAry) => {
			let links = linksAry.filter(d => d != undefined).flat();
			let count = 0;
			let allCount = links.length;
			console.log('第二次抓取到的链接：\n', links);

			async.mapLimit(links, 1, async function ({ href, title }) {
				let { text } = await req(href);
				let $ = cheerio.load(text);
				let imgSrc = $('.pic_tupian img').attr('src');
				let { domain, hrefPrev } = getHrefInfo(href);
				if (!UrlReg.test(imgSrc)) {
					if (String(imgSrc).startsWith('/')) {
						imgSrc = domain + imgSrc
					} else {
						imgSrc = hrefPrev + imgSrc
					}
				}
				await download(imgSrc, savePath + title);
				console.log(`当前图片下载进度:${++count}/${allCount}\n`)
			}, () => {
				let href = globalLinkQueue.shift()
				if (href && spiderCount <= spiderCountLimit) {
					spider(href)
				}
			})
		})
	}).catch(err => {
		console.log('错误信息=》', err)
	})
}

