from requests.exceptions import RequestException
from lxml import etree
import re,time,json
import requests

def getPage(url):
	
	try:
		headers={
		    'User-Agent':'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
		}

		res = requests.get(url,headers=headers)
		#判断响应状态
		if res.status_code == 200:
			return res.text
		else:
			return None

	except RequestException as e:
		return None

def parsePage(content):
	'''用xpath解析内容'''
	#解析HTML文档,返回节点对象
	html = etree.HTML(content)
	#获取网页中的所有标签并遍历出标签名
	items = html.xpath('//table[@width="100%"]')

	#遍历封装数据并遍历输出标签名
	for item in items:
		if item.xpath(".//div[@class='pl2']/a/span"):
			title = item.xpath(".//div[@class='pl2']/a/@title")[0]+item.xpath(".//div[@class='pl2']/a/span/text()")[0]
		else:
			title = item.xpath(".//div[@class='pl2']/a/@title")[0]
			
		yield{
			'title':title,
			'info':item.xpath(".//p[@class='pl']/text()")[0],
			'score':item.xpath(".//div[@class='star clearfix']/span[@class='rating_nums']/text()")[0],
			'pic':item.xpath(".//img[@width='90']/@src")[0]
		}




def writeFile(content):
	#执行文件写操作
	with open("./result1.text",'a',encoding='utf-8') as f:
		f.write(json.dumps(content,ensure_ascii=False)+"\n")

def main(offset):
	url='https://book.douban.com/top250?start='+str(offset)
	html = getPage(url)
	if html:
		for item in parsePage(html):
			writeFile(item)
			#print(item)

if __name__ == '__main__':
	#main(0)
	for i in range(10):
		main(offset=i*25)
		time.sleep(1)
