from requests.exceptions import RequestException
from lxml import etree
from bs4 import BeautifulSoup
import re,time,json
from pyquery import PyQuery as pq
import requests

def getPage(url):
	
	try:
		headers={
		    'User-Agent':'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
		}

		res = requests.get(url,headers=headers)
		#判断响应状态
		if res.status_code == 200:
			return res.text
		else:
			return None

	except RequestException as e:
		return None

def parsePage(content):
	'''使用beautiful soup 解析内容'''
	#print(content)
	#
	##解析HTML文档
	soup = BeautifulSoup(content,"lxml")
	#获取网页中的标签并遍历输出标签
	items = soup.find_all(name="tr",attrs={"class":"item"})

	#print(items)
	#遍历封装并返回
	for item in items:
		yield{
			'title':str(item.select("div.pl2 a")[0].get_text()).replace("\n","").replace("  ",""),
			'info':item.select("p.pl")[0].get_text(),
			'score':item.select("span.rating_nums")[0].string,
			'pic':item.find(name="img",attrs={"alt":""}).attrs['src']

		}
	
	
def writeFile(content):
	#执行文件写操作
	with open("./result3.text",'a',encoding='utf-8') as f:
		f.write(json.dumps(content,ensure_ascii=False)+"\n")

def main(offset):
	url='https://book.douban.com/top250?start='+str(offset)
	html = getPage(url)
	if html:
		for item in parsePage(html):
			writeFile(item)
			#print(item)

if __name__ == '__main__':
	#main(0)
	for i in range(10):
		main(offset=i*25)
		time.sleep(1)
