import requests
from pyquery import PyQuery as pq
from bs4 import BeautifulSoup
from lxml import etree
import re
import json
import time



def getPage(url):
	try:
		# 伪装用户
		headers = {"User-Agent":'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
		res = requests.get(url,headers=headers)
		if res.status_code == 200:
			return res.text
		else:
			return None
	except Exception as e:
		print(e) 
		return None
	

def parsePage(html):
	global num
	#解析一下内容


	# ====================PyQuery解析====================
	'''
	doc = pq(html)
	#找出网页中的一部部图书
	items = doc('.item')
	print(num)
	for item in items.items():
		yield{
			'index':num,
			'title':item.find('.pl2 a').text(),
			'img':item.find('.nbg img').attr('src'),
			'info':item.find('p.pl').text(),
			'score':item.find('.rating_nums').text()
		}
		num+=1
	'''
	
	# ================BeautifulSoup解析=====================
	# 初始化HTML文档解析对象
	# soup = BeautifulSoup(html,'lxml')
	# items = soup.select('.item')
	# for item in items:
	# 	yield({
	# 		'index':num,
	# 		'title':item.select('.pl2 a')[0].attrs['title'],
	# 		'img':item.select('.nbg img')[0].attrs['src'],
	# 		'info':item.select('p.pl')[0].string,
	# 		'score':item.select('.rating_nums')[0].string
	# 	})
	# 	num+=1


	# ==================Xpath解析=====================
	html = etree.HTML(html)
	items = html.xpath('//tr[@class="item"]')
	for item in items:
		yield({
			'index':num,
			'title':item.xpath('.//div[@class="pl2"]/a/@title')[0],
			'img':item.xpath('.//a[@class="nbg"]/@href')[0],
			'info':item.xpath('.//p[@class="pl"]/text()')[0],
			'score':item.xpath('.//div[@class="star clearfix"]/span[@class="rating_nums"]/text()')[0]
		})
		num+=1



def writeFile(content):
	with open('./Xpath_top250_book.txt','a',encoding='utf-8') as f:
		#只能写入字符串，所以要用Json转换
		f.write(json.dumps(content,ensure_ascii=False)+'\n')
	

def main(page):
	
	url = 'https://book.douban.com/top250?start='+str(page)
	html = getPage(url)
	content=parsePage(html)
	for i in content:
		writeFile(i)


if __name__=='__main__':
	num=1;
	for i in range(10):
		main(i*25)
		time.sleep(1)
	# main(0)