# -*- coding : utf-8 -*-
# coding=utf-8
# ===============================
# Copyright (c) 2025 2578713815@qq.com | FlashVolador[https://gitee.com/FlashVolador]
# Licensed under the MIT License.
# ===============================
"""
爬虫应用--抓取公网图片;

程序概述：
采用控制字符界面
爬取百度网站上的相关主题图片, 并能按照关键词检索图片并下载到本地保存;

开发环境:
解释器: python3.8.10
操作系统: Windows11
IDE: pycharm professional 2024.1;

版本号2.1.3.0;

作者: @FlashVolador
邮箱: 2578713815@qq.com
"""

import random
import re
import sys
import time
import requests
import json
import os
from anti_useragent import AntiUserAgent  # 用于获取可用的随机UA


urls = {
	'index': 'https://images.baidu.com/',
	'search': 'https://images.baidu.com/search/acjson',
}
# 网址储存池

t1 = 0.001
t10 = 0.01
t100 = 0.1
t1000 = 1
# 速度调节参数


def get_headers() -> dict:
	"""
	· 定义请求头信息,将爬虫伪装成浏览器
	· 在一次爬取行为中,尽量保持这个请求头,不要重复拉取
	· 进行五次请求，如果没拿到Cookie就用默认的headers
	:return: 请求头-dict
	"""
	n = 0
	while True:
		headers = {
			'User-Agent': AntiUserAgent().random,
			'Accept-Encoding': 'gzip, deflate, br, zstd',
			'Accept-Language': 'zh-CN,zh;q=0.9,en-GB;q=0.8,en;q=0.7,en-US;q=0.6',
			'Connection': 'keep-alive',
			'Host': 'images.baidu.com',
			'Referer': 'https://images.baidu.com/',
		}
		if n <= 4:
			try:
				response = requests.get(urls['index'], headers=headers, timeout=32)
				if response.status_code == 200:
					cookies = response.cookies
					cookies_str = ';'.join([f'{cookie.name}={cookie.value}' for cookie in cookies])
					headers.update({'Cookie': cookies_str})
					# 如果服务器响应, 获取返回的cookie, 并加入请求头
					return headers
				else:
					n += 1
					continue
			except Exception:
				input(f'>>>Please check the network.ENTER TO EXIT.')
				sys.exit(0)
		else:
			print('>>>Cookie request failed: The default header will be enabled.')
			return headers


def get_params(word: str, pn: int) -> dict:
	"""
	· 制定get请求的params
	· 由于检索后是一个懒加载页面,数据是逐步发送的,每次发送30张
	· 这就需要设置一个整型参数`pn`来控制到底爬取哪一部分内容
	· 同时注意请求参数中的gsm是防盗链参数，懒加载内容的连续请求需要更新这个参数
	:param word: 键入的关键词
	:param pn: 一个用于控制加载量的整数(相当于页数)
	:return: url参数-dict
	"""
	params = {
		"tn": "resultjson_com",
		"ipn": "rj",
		"ct": "201326592",
		"fp": "result",
		"word": word,
		"queryWord": word,
		"cl": "2",
		"lm": "-1",
		"ie": "utf-8",
		"oe": "utf-8",
		"st": "-1",
		"ic": "0",
		"face": "0",
		"istype": "2",
		"nc": "1",
		"pn": f"{30*pn}",
		"rn": "30",
		"gsm": "1e"
	}
	return params


def get_html(url: str, headers: dict, params: dict) -> requests.Response or bool:
	"""
	用于向网址发送请求，并获取Response对象
	:param params: 请求时的路由参数
	:param url: 请求的目标路由
	:param headers: 请求头
	:return: Response对象 or bool[False]
	"""
	response = requests.get(url, headers=headers, params=params, timeout=280)
	if response.status_code == 200:
		return response
	else:
		return False


def add_json(json_path: str, json_data: dict) -> None:
	"""
	生成json文件
	:param json_path: 储存url的json文件路径
	:param json_data: 储存图片资源url的字典
	:return: None
	"""
	print('>>>Saving json file...')
	with open(json_path, 'r', encoding='utf-8') as f:
		existing_data = json.load(f)
	existing_data.update(json_data)
	with open(json_path, 'w', encoding='utf-8') as f:
		f.write(json.dumps(existing_data, ensure_ascii=False, indent=4))


def make_download_file(image_num: int, headers: dict, select_word: str) -> bool:
	"""
	制作url队列文件
	:param image_num: 用户输入的图片数量
	:param headers: 请求头
	:param select_word: 键入的关键词
	:return: bool
	"""
	log_info_list = read_log(f'./ImageDirs/{select_word}/LOG.txt')
	params = get_params(word=select_word, pn=0)
	num = 0  # 记录已经添加到下载队列的图片数量
	links = {}
	new_log_list = []
	gsm = '"gsm":"(.*?)",'
	thumbURL = '"thumbURL":"(.*?)",'
	i = 1
	while num <= image_num:
		if html := get_html(url=urls['search'], headers=headers, params=params):
			print(f'>>>[{html.status_code} OK]Retrieving image link...')
			html = html.text.replace(' ', '')
			link_list = re.findall(thumbURL, html)
			if len(link_list) != 0:
				gsm = re.findall(gsm, html)[0]
				# if len_links <= (image_num - num) % 30:
				# 	print(f">>>网站内所有<{select_word}>相关图像数量不足<{image_num}>.[默认下载剩余所有图像]")
				# 	image_num = (image_num - num) % 30 + image_num - 30
				params.update({"gsm": gsm})
				params.update({"pn": f"{30 * i}"})
				for link in link_list:
					try:
						if link[28:38] not in log_info_list:  # 如果日志信息内没有这个u序列号
							key = select_word + link[28:38] + '.JPEG'
							links.update({key: link})
							# 将链接加入待保存字典
							new_log_list.append(link[28:38])
							# 将u序列号加入新的日志信息
							num += 1
							print(f">>>Retrieve resources link: {select_word}{link[28:38]}.JPEG" + ':' + f"{link}")
							time.sleep(t100 * 2)
						else:
							print('>>>The link already exists.CONTINUE...')
						# time.sleep(t100 * 2)
					except Exception:
						print('>>>This ontology search is complete.CONTINUE...')
					if num >= image_num:
						# 当添加到下载队列的图片数量达到要求时, 跳出循环
						break
			else:
				print('>>>There  no more data available.')
		else:
			input(f'>>>[{html.status_code} NOT OK]The server denies access!')
			return False

		if num >= image_num or len(link_list) == 0:
			# 当添加到下载队列的图片数量达到要求时,开始生成临时队列文件, 生成并更新log文件信息
			add_json(f'./ImageDirs/{select_word}/{select_word}.json', links)
			# 将本次的url加入总url队列。用于记录获取到的资源链接
			with open(f'./ImageDirs/{select_word}/{select_word}00x00.json', 'w', encoding='utf-8') as f:
				f.write(json.dumps(links, ensure_ascii=False, indent=4))
			# 这是一个临时文件，用于存放将下载的url队列, 将被download_file()用于下载图片, 下载完成后就会删除
			print('>>>Saving log info...')
			for info in new_log_list:
				make_log(info)
			print('-' * 50)
			return True
		else:
			i += 1
			continue


def make_log(info: str) -> None:
	"""
	将传入的信息参数写入日志文件
	:param info: 需要保存的日志信息
	:return:
	"""
	with open(f'./ImageDirs/{select_word}/LOG.txt', 'a', encoding='utf-8') as f:
		f.write(info + '\n')


def download_file(select_word: str, file_path: str) -> bool:
	"""
	根据url队列文件下载图片文件
	:param select_word: 搜索词，用于构建文件目录
	:param file_path: url队列文件（json格式）
	:return: bool
	"""
	with open(file_path, 'r', encoding='utf-8') as f:
		url_data = json.load(f)  # 获取到的url队列字典
	for i in url_data:
		headers = {
			'User-Agent': AntiUserAgent().random,
		}
		# 这里不能使用get_headers()定义的headers,否则会403
		# 吐槽: 链接被拿到了就不管身份验证了是吧, 无语
		url = url_data[i]
		response = get_html(url, headers, params={})
		# get(url=url, headers=headers, timeout=280)
		if response.status_code == 200:
			with open(f'./ImageDirs/{select_word}/Images/{i}', 'wb') as f:
				f.write(response.content)
			print(f'Downloading...: {i}--{url_data[i]}')
			time.sleep(t100 * random.randrange(1, 5))
		# 随机停顿时间，防止基于请求频率的反爬措施
		else:
			print(f'>>>Error code:[{response.status_code}]!')
			continue
	return True


def make_setting_file() -> bool:
	"""
	此函数用于构建一个设置文件,其内部有一个增量式爬虫开关, 1为开启, 0为关闭
	:return: bool
	"""
	if not os.path.exists('./SETTINGS.json'):
		print(f'>>>Make Configuration:./SETTINGS.json...')
		setting_dict = {
			"incrementalSwitch": 1
		}
		with open('./SETTINGS.json', 'w', encoding='utf-8') as f:
			json.dump(setting_dict, f, ensure_ascii=False, indent=4)
		return True
	else:
		return False


def configurate_file() -> None:
	"""
	形成目录结构，生成:用于记录下载过的图片的LOG.txt文件 | 用于保存url的.json文件
	:return:
	"""
	if not os.path.exists(f'./ImageDirs/{select_word}/Images'):
		os.makedirs(f'./ImageDirs/{select_word}/Images')  # 关键词文件夹/图片存放文件夹
		print(f'>>>Make Directory:./ImageDirs/{select_word}/Images...')

	if not os.path.exists(f'./ImageDirs/{select_word}/{select_word}.json'):
		with open(f'./ImageDirs/{select_word}/{select_word}.json', 'w', encoding='utf-8') as f:
			json.dump({}, f, ensure_ascii=False, indent=4)
		# url队列
		print(f'>>>Make Configuration:./ImageDirs/{select_word}/{select_word}.json...')

	if not os.path.exists(f'./ImageDirs/{select_word}/LOG.txt'):
		with open(f'./ImageDirs/{select_word}/LOG.txt', 'w', encoding='utf-8') as f:
			f.write('')  # 日志文件
		print(f'>>>Make LOG File:./ImageDirs/{select_word}/LOG.txt...')
	else:
		with open('./SETTINGS.json', 'r', encoding='utf-8') as f:
			settings = json.load(f)
		incremental_switch = settings['incrementalSwitch']
		if incremental_switch == 0:
			with open(f'./ImageDirs/{select_word}/LOG.txt', 'w', encoding='utf-8') as f:
				f.write('')
				os.fsync(f.fileno())


def read_log(file_path: str) -> list:
	"""
	读取日志文件，并将其信息制成列表
	:param file_path: 日志文件路径
	:return: 返回一个包含日志文件内的信息的列表
	"""
	with open(file_path, 'r', encoding='utf-8') as f:
		log_info = f.readlines()
		log_info = [s.strip() for s in log_info]
		return log_info


if __name__ == '__main__':
	header = get_headers()
	make_setting_file()

	while True:
		select_word = input('>>>Please input keyword(`0` TO EXIT): ')
		if select_word == '0':
			break
		image_num = input('>>>Please input image number(`0` TO EXIT): ')
		if image_num == '0':
			break
		configurate_file()
		time.sleep(t100 * 5)
		# 构造目录结构
		if image_num.isdigit() and int(image_num) > 0:
			if make_download_file(int(image_num), header, select_word):  # 获取url队列--json格式
				download_file(select_word, f'./ImageDirs/{select_word}/{select_word}00x00.json')
				# 制作临时url队列文件，下载图片

				os.remove(os.path.join(f'./ImageDirs/{select_word}/{select_word}00x00.json'))
				# 下载完成后删除临时文件

				a = input('>>>Image download successfully.EXIT?(N TO CONTINUE).')
				if a == 'N' or a == 'n':
					continue
				else:
					break
			else:
				a = input('>>>No Data.RETRY?(Y TO CONTINUE).')
				if a == 'Y' or a == 'y':
					continue
				else:
					break
		else:
			print('>>>Invalid input. Please retry.')
			time.sleep(t100 * 8)
