import requests
from requests.exceptions import RequestException
from lxml import etree
from bs4 import BeautifulSoup
import json
from pyquery import PyQuery 
import time

def getPage(url):
	try:
		#定义请求头信息伪装浏览器用户
		headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36"}
		#执行爬取
		res = requests.get(url,headers=headers)
		#判断并返回结果
		if res.status_code == 200:
			return res.text
		else:
			return None
	except RequestException:
		return None



def parsePage(content):
	'''解析爬取内容 返回结果'''
	#================================Xpath=============================
	'''
	html = etree.HTML(content)
	#解析网页<tr class="item"></tr>
	items = html.xpath("//tr[@class='item']")
	#解析具体信息
	for item in items:
		yield{
		'name':item.xpath(".//div[@class='pl2']/a/@title"),
		'detail':item.xpath(".//p[@class='pl']/text()"),
		'image':item.xpath(".//img[@width='90']/@src")[0],
		'quote':item.xpath(".//span[@class='inq']/text()"),
		'score':item.xpath(".//div/span[@class='rating_nums']/text()")
		}
	'''
	#===========================BeautifulSoup=========================
	'''
	soup = BeautifulSoup(content,'lxml')
	items = soup.find_all(name="tr",attrs={'class':'item'})
	for item in items:
		yield{
		'name':item.select('div.pl2 a')[0].get_text(),
		'detail':item.find(name="p",attrs={"class":"pl"}).string,
		'image':item.find(name="img",attrs={'width':'90'}).attrs['src'],
		'quote':item.find(name='span',attrs={'class':'inq'}).string,
		'score':item.find(name='span',attrs={'class':'rating_nums'}).string,
		}
	'''
	#=======================使用pyquery解析==================	
	doc = PyQuery(content)
	items = doc("tr.item")
	for item in items.items():
		yield{
		'name':item.find('div.pl2 a').text(),
		'detail':item.find('p.pl').text(),
		'image':item.find('div.pl2 img').attr('src'),  #注意是attr()方法而不是字典
		'quote':item.find('p.quote span.inq').text(),
		'score':item.find('span.rating_nums').text()
	}


def writeFile(content):
	with open("./result.text",'a',encoding="utf-8") as f: 
		#字典格式 用json转义  压成串   序列化 处理中文默认用ascii码
		f.write(json.dumps(content,ensure_ascii=False)+"\n")



def main(offset):
	url = "https://book.douban.com/top250?start="+str(offset)
	#爬取数据
	html = getPage(url)
	if html:
		#解析数据
		for item in parsePage(html):
			print(item)
			writeFile(item)


if __name__ == "__main__":
	for i in range(10):
		main(offset = i*25) 
		#每爬取一页停止1s
		time.sleep(1) 