import requests
import re
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
from bs4 import BeautifulSoup


def getHTMLText(url):
	try:
		kv = {'user-agent':'Mozila/5.0'}
		r = requests.get(url,headers=kv,timeout=30)
		r.raise_for_status()
		r.encoding = r.apparent_encoding
		return r.text
	except Exception as e:
		print("访问URL失败")
		return ""

def parsePage(ilt,html):
	try:
		# re.M 将字符串视为多行
		# 使用re.S参数以后，正则表达式会将这个字符串作为一个整体，将“\n”当做一个普通的字符加入到这个字符串中，在整体中进行匹配。
		plt = re.findall(r'<div class=\"p-price\">(.*?)</div>',html,re.S|re.M)
		tlt = re.findall(r'<div class=\"p-name p-name-type-2\">(.*?)</div>',html,re.S|re.M)
		for i in range(len(plt)):
			price = BeautifulSoup(plt[i],'html.parser').i.string
			if price == None:
				price = BeautifulSoup(plt[i],'html.parser').strong.attrs['data-price']
			title = BeautifulSoup(tlt[i],'html.parser').a.attrs['title']
			ilt.append([price,title])
		return ilt
	except:
		return ""


def printGoodsList(ilt):
	tplt = "{:^4}\t{:^10}\t{:20}"
	print(tplt.format("序号","价格","商品名称"))
	for i in range(len(ilt)):
		if not ilt[i][0] == None:
			print(tplt.format(i,ilt[i][0],ilt[i][1]))	

def main():
	goods = '书包'
	depth = 5
	start_url = 'https://search.jd.com/Search?keyword=笔记本&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=笔记本&s=110&click=0&page='
	infoList = []
	for i in range(depth):
		try:
			url = start_url + str(i)
			html = getHTMLText(url)
			parsePage(infoList,html)
		except Exception as e:
			print(e)
			continue
	printGoodsList(infoList)

if __name__ == "__main__":
	main()


							