from bs4 import BeautifulSoup
from lxml import etree
import requests
import random
import hashlib

USER_AGENT_LIST = [
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
	"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
	"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
	"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
	"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
	"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
	"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
	"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR "
	"2.0.50727; SE 2.X MetaSr 1.0)",
	"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
	"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
	"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
	"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
	"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
	"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
	"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4496.0 "
	"Safari/537.36",
	"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 "
	"Safari/605.1.15",
	"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 "
	"Safari/605.1.15 "
]

headers = {
	"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,"
	          "application/signed-exchange;v=b3;q=0.9",
	'User-Agent': random.choice(USER_AGENT_LIST),
	'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
	'Accept-Encoding': 'gzip, deflate, br',
	'Accept-Language': 'zh-cn',
	'Referer': 'https://weixin.sogou.com/'
}

headers1 = {
	'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
	'Cookie': 'SNUID=D9135999A2A764A850602E4EA3FB3050; JSESSIONID=aaa5habIFTtIcfGJxV9Ox; weixinIndexVisited=1; '
	          'ABTEST=0|1625125204|v1; IPLOC=CN4403; SUID=BB8D64717E1A910A0000000060AC3E5E; SUID=BB8D6471771A910A0000000060AC3E5D; '
	          'SUV=002864AE71648DBB60AB2E494CCA6865',
	'User-Agent': random.choice(USER_AGENT_LIST),
	'Accept-Encoding': 'gzip, deflate, br',
	'Accept-Language': 'zh-cn',
	'Connection': 'keep-alive'
}


def request_url(url):
	retry = 0
	resp = None
	try:
		while retry < 3:
			resp = requests.get(url, headers=headers)
			retry += 1
			if resp:
				break
		return resp
	except Exception as e:
		print("页面请求失败" + str(e))
		return resp


def get_data(kwd):
	for i in range(1, 3):
		url = f"https://weixin.sogou.com/weixin?query={kwd}&type=2&page={i}&ie=utf8"
		response = request_url(url)
		assert response is not None, "请求失败"
		parse_with_lxml(response.text)


# parse_with_soup(response.text)


# 使用lxml进行解析
def parse_with_lxml(text):
	html = etree.HTML(text)
	items = html.xpath('//div[@class="txt-box"]')
	for item in items:
		title = item.xpath('.//h3/text()')
		print(title)
		a_text = item.xpath('.//a[@class="account"]/text()')
		
		pass


# 使用soup进行解析
def parse_with_soup(text):
	soup = BeautifulSoup(text, "lxml")
	items = soup.find_all('div', attrs={'class': 'txt-box'})
	for item in items:
		print(item.h3.text)
		tag_a_text = item.find('a', attrs={'class': 'account'}).text
		print(tag_a_text)
		tag_a_href = item.find('a', attrs={'class': 'account'})["href"]
		print(tag_a_href)
		tag_span_text = item.find('div', attrs={'class': 's-p'})["t"]
		print(tag_span_text)
		tag_p_text = item.find('p', attrs={'class': 'txt-info'}).text
		print(tag_p_text)


if __name__ == '__main__':
	# kwd = "python"
	# get_data(kwd)
	# url = 'https://www.baidu.com'
	url = 'http://www.example.com/image.jpg'
	
	print(hashlib.algorithms_available)
	print('*' * 80)
	
	sha1obj = hashlib.sha1()
	sha1obj.update(url.encode())
	hashcode = sha1obj.hexdigest()
	print(hashcode)
	
	print('*' * 80)
	for hash_name in hashlib.algorithms_available:
		h = hashlib.new(hash_name)
		if hash_name.startswith('shake_'):
			continue
		h.update(url.encode('utf-8'))
		print(hash_name + " : " + h.hexdigest())
