# -*- coding : utf-8 -*-
# coding=utf-8
# ===============================
# Copyright (c) 2024 FlashVolador | 2578713815@qq.com 
# Licensed under the MIT License.
# ===============================
"""
爬虫应用--抓取公网图片;

程序概述：
采用控制字符界面
爬取百度网站上的相关主题图片, 并能按照关键词检索图片并下载到本地保存;

开发环境:
解释器: python3.8.10
操作系统: Windows11
IDE: pycharm professional 2024.1;

版本号2.0.0.0;

程序作者: 2578713815@qq.com  @FlashVolador;

:ps: 没有使用bs4库, 因为抓包发现服务器返回的是json格式的数据, 而bs4库仅支持lxml和html格式

:ps: 爬虫速度做了一些限制,请求速度不会太快(因为我没有IP代理池,担心会触发反爬机制)

:ps: 返回的数据可能有'gzip, deflate, br, zstd'四种编码格式，后两种格式需要额外安装库brotli、zstandard
:ps: 虽然没有import, 但是requests.get可能会再合适的时候自动调用brotli、zstandard库,所以编译的时候记得把这两个库带上
"""
import re
import random
import sys
import time
import requests
import json
import os
from anti_useragent import UserAgent  # 用于获取可用的随机UA
import chardet  # chardet.detect(b'...') 检测数据编码格式(返回一个字典)


urls = {
	'index': 'https://images.baidu.com/',
	'search': 'https://images.baidu.com/search/acjson',
}
# 网址储存池

t1 = 0.001
t10 = 0.01
t100 = 0.1
t1000 = 1
# 速度调节参数


def get_headers() -> dict:
	"""
	· 定义请求头信息,将爬虫伪装成浏览器
	· 在一次爬取行为中,尽量保持这个请求头,不要重复拉取
	:return: 请求头-dict
	"""
	n = 0
	while True:
		headers = {
			'User-Agent': UserAgent().random,
			'Accept-Encoding': 'gzip, deflate, br, zstd',
			'Accept-Language': 'zh-CN,zh;q=0.9,en-GB;q=0.8,en;q=0.7,en-US;q=0.6',
			'Connection': 'keep-alive',
			'Host': 'images.baidu.com',
			'Referer': 'https://images.baidu.com/',
		}
		if n <= 4:
			try:
				response = requests.get(urls['index'], headers=headers, timeout=32)
				if response.status_code == 200:
					cookies = response.cookies
					cookies_str = ';'.join([f'{cookie.name}={cookie.value}' for cookie in cookies])
					headers.update({'Cookie': cookies_str})
					# 如果服务器响应, 获取返回的cookie, 并加入请求头
					return headers
				else:
					n += 1
					continue
			except Exception:
				input(f'>>>Please check the network.ENTER TO EXIT.')
				sys.exit(0)
		else:
			print('>>>Cookie request failed: The default header will be enabled.')
			return headers


def get_params(word: str, n: int) -> dict:
	"""
	· 用于获取检索信息后服务器返回的数据
	· 由于检索后是一个懒加载页面,数据是逐步发送的,每次发送30张
	· 这就需要设置一个整型参数`n`来控制到底爬取哪一部分内容
	:param word: 键入的关键词
	:param n: 一个用于控制加载量的整数,,(相当于页数)
	:return: url参数-dict
	"""
	params = {
		"tn": "resultjson_com",
		"ipn": "rj",
		"ct": "201326592",
		"fp": "result",
		"word": word,
		"queryWord": word,
		"cl": "2",
		"lm": "-1",
		"ie": "utf-8",
		"oe": "utf-8",
		"st": "-1",
		"ic": "0",
		"face": "0",
		"istype": "2",
		"nc": "1",
		"pn": f"{30}*{n}",
		"rn": "30",
	}
	return params


def get_html(url: str, headers: dict, params: dict) -> requests.Response or bool:
	"""
	用于向网址发送请求，并获取Response对象
	:param params: 请求时的路由参数
	:param url: 请求的目标路由
	:param headers: 请求头
	:return: requests.Response对象
	"""
	response = requests.get(url, headers=headers, params=params, timeout=280)
	if response.status_code == 200:
		return response
	else:
		return False


def write_json(json_path: str, json_data: dict) -> None:
	"""
	生成json文件
	:param json_path: 储存url的json文件路径
	:param json_data: 储存图片资源url的字典
	:return: None
	"""
	print('>>>Saving json file...')
	with open(json_path, 'r', encoding='utf-8') as f:
		existing_data = json.load(f)
	existing_data.update(json_data)
	with open(json_path, 'w', encoding='utf-8') as f:
		f.write(json.dumps(existing_data, ensure_ascii=False, indent=4))


def make_download_file(image_num: int, headers: dict, select_word: str) -> bool:
	"""
	制作url队列文件
	:param image_num: 用户输入的图片数量
	:param headers: 请求头
	:param select_word: 键入的关键词
	:return: bool
	"""
	log_info_list = read_log(f'./ImageDirs/{select_word}/LOG.txt')
	num = 0
	links = {}
	new_log_list = []
	# while num <= image_num:
	for i in range(1, 9999):
		if html := get_html(urls['search'], headers=headers, params=get_params(word=select_word, n=i)):
			print(f'>>>[{html.status_code} OK]Retrieving image link...')

			result = chardet.detect(html.content)  # 获取编码格式
			html_text = html.content.decode(str(result['encoding']), 'ignore')  # 将字节流转换成字符串;'ignore' 的作用是忽略不能被解码的字符
			content_without_emoji = re.sub(r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F1E0-\U0001F1FF]', '', html_text)
			# 去除emoji

			# 将字符串转换成python字典
			if data := json.loads(content_without_emoji, strict=False)['data']:
				for link in data:
					if num >= image_num:
						write_json(f'./ImageDirs/{select_word}/{select_word}.json', links)
						# 这是将本次的url加入总url队列。用于记录获取到的资源链接
						with open(f'./ImageDirs/{select_word}/{select_word}00x00.json', 'w', encoding='utf-8') as f:
							f.write(json.dumps(links, ensure_ascii=False, indent=4))
						# 这是一个临时文件，用于存放将下载的url队列
						print('>>>Saving log info...')
						for info in new_log_list:
							make_log(info)
						print('-' * 50)
						return True
					else:
						try:
							if link['thumbURL'][28:38] not in log_info_list:  # 如果日志信息内没有这个u序列号
								key = select_word + link['thumbURL'][28:38] + '.JPEG'
								links.update({key: link['thumbURL']})
								# 将链接加入待保存字典
								new_log_list.append(link['thumbURL'][28:38])
								# 将u序列号加入新的日志信息
								num += 1
								print(
									f">>>Retrieve resources link: {select_word}{link['thumbURL'][28:38]}.JPEG" + ':' + f"{link['thumbURL']}")
								time.sleep(t100 * 2)
								continue
							else:
								print('>>>The link already exists.CONTINUE...')
								# time.sleep(t100 * 2)
								continue
						except Exception:
							print('>>>Unknown error.CONTINUE...')
							continue
			else:
				print('>>>No data was retrieved this time.CONTINUE...')
				time.sleep(t100 * 2)
				continue
		else:
			input(f'>>>[{html.status_code} NOT OK]The server denies access!')
			return False


def make_log(info='') -> None:
	"""
	将传入的信息参数写入日志文件
	:param info: 需要保存的日志信息
	:return:
	"""
	with open(f'./ImageDirs/{select_word}/LOG.txt', 'a', encoding='utf-8') as f:
		f.write(info + '\n')


def download_file(select_word: str, file_path: str) -> bool:
	"""
	根据url队列文件下载图片文件
	:param select_word: 搜索词，用于构建文件目录
	:param file_path: url队列文件（json格式）
	:return: bool
	"""
	with open(file_path, 'r', encoding='utf-8') as f:
		url_data = json.load(f)  # 获取到的url队列字典
	for i in url_data:
		headers = {
			'User-Agent': UserAgent().random,
		}
		# 这里不能使用get_headers()定义的headers,否则会403
		# 吐槽: 链接被拿到了就不管安全验证了是吧, 无语
		url = url_data[i]
		response = get_html(url, headers, params={})
		# requests.get(url=url, headers=headers, timeout=280)
		if response.status_code == 200:
			with open(f'./ImageDirs/{select_word}/Images/{i}', 'wb') as f:
				f.write(response.content)
			print(f'Downloading...: {i}--{url_data[i]}')
			time.sleep(t100 * random.randrange(1, 5))
			# 随机停顿时间，防止基于请求频率的反爬措施
		else:
			print(f'>>>Error code:[{response.status_code}]!')
			continue
	return True


def make_setting_file() -> bool:
	"""
	此函数用于构建一个设置文件,其内部有一个增量式爬虫开关, 1为开启, 0为关闭
	:return:
	"""
	if not os.path.exists('./SETTINGS.json'):
		print(f'>>>Make Configuration:./SETTINGS.json...')
		setting_dict = {
			"incrementalSwitch": 1
		}
		with open('./SETTINGS.json', 'w', encoding='utf-8') as f:
			json.dump(setting_dict, f, ensure_ascii=False, indent=4)
		return True
	else:
		return False


def configurate_file() -> None:
	"""
	形成目录结构，生成:用于记录下载过的图片的LOG.txt文件/用于保存url的.json文件
	:return:
	"""
	if not os.path.exists(f'./ImageDirs/{select_word}/Images'):
		os.makedirs(f'./ImageDirs/{select_word}/Images')  # 关键词文件夹/图片存放文件夹
		print(f'>>>Make Directory:./ImageDirs/{select_word}/Images...')

	if not os.path.exists(f'./ImageDirs/{select_word}/{select_word}.json'):
		with open(f'./ImageDirs/{select_word}/{select_word}.json', 'w', encoding='utf-8') as f:
			json.dump({}, f, ensure_ascii=False, indent=4)
		# url队列
		print(f'>>>Make Configuration:./ImageDirs/{select_word}/{select_word}.json...')

	if not os.path.exists(f'./ImageDirs/{select_word}/LOG.txt'):
		with open(f'./ImageDirs/{select_word}/LOG.txt', 'w', encoding='utf-8') as f:
			f.write('')  # 日志文件
		print(f'>>>Make LOG File:./ImageDirs/{select_word}/LOG.txt...')
	else:
		with open('./SETTINGS.json', 'r', encoding='utf-8')as f:
			settings = json.load(f)
		incremental_switch = settings['incrementalSwitch']
		if incremental_switch == 0:
			with open(f'./ImageDirs/{select_word}/LOG.txt', 'w', encoding='utf-8') as f:
				f.write('')
				os.fsync(f.fileno())
		else:
			pass


def read_log(file_path: str) -> list:
	"""
	读取日志文件，并将其信息制成列表
	:param file_path: 日志文件路径
	:return: 返回一个包含日志文件内的信息的列表
	"""
	with open(file_path, 'r', encoding='utf-8') as f:
		log_info = f.readlines()
		log_info = [s.strip() for s in log_info]
		return log_info


if __name__ == '__main__':
	header = get_headers()
	make_setting_file()

	while True:
		select_word = input('>>>Please input keyword(`0` TO EXIT): ')
		if select_word == '0':
			sys.exit(0)
		image_num = input('>>>Please input image number(`0` TO EXIT): ')
		if image_num == '0':
			sys.exit(0)
		configurate_file()
		time.sleep(t100 * 5)
		# 构造目录结构
		if image_num.isdigit() and int(image_num) > 0:
			if make_download_file(int(image_num), header, select_word):  # 获取url队列--json格式
				download_file(select_word, f'./ImageDirs/{select_word}/{select_word}00x00.json')
				# 制作临时url队列文件，下载图片

				os.remove(os.path.join(f'./ImageDirs/{select_word}/{select_word}00x00.json'))
				# 下载完成后删除临时文件

				a = input('>>>Image download successfully.EXIT?(N TO CONTINUE).')
				if a == 'N' or a == 'n':
					continue
				else:
					sys.exit(0)
			else:
				a = input('>>>No Data.RETRY?(Y TO CONTINUE).')
				if a == 'Y' or a == 'y':
					continue
				else:
					sys.exit(0)
		else:
			print('>>>Invalid input. Please retry.')
			time.sleep(t100 * 8)
