from lxml import etree
from bs4 import BeautifulSoup as bs
from pyquery import PyQuery
import requests
import os,time,openpyxl

headers={
	'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5478.400 QQBrowser/10.1.1550.400',
}

def getpage(url,page):
	'''根据所给url爬取网页信息'''
	try:
		print("正在爬取豆瓣图书TOP250榜的所有图书信息（当前第"+str(int(page/25+1))+"页）...")
		res = requests.get(url,headers=headers)
		if res.status_code==200:
			return res.text
		else:
			print("网页爬取失败...")
			return None
	except Exception as err:
		print("网页爬取失败，原因是："+str(err))
		return None

def parsepage(content,page):
	'''解析爬取网页中内容，并返回结果'''
	print("解析网页内容中...")

	#使用BeautifulSoup解析
	#初始化，返回BeautifulSoup对象
	soup = bs(content,'lxml')
	#解析网页中<tr class="item">....</tr>信息
	items = soup.find_all(name="tr",attrs={"class":"item"})
	#遍历并解析每本图书具体信息
	index=1
	for item in items:
		if len(item.find(name="p",attrs={'class':'pl'}).string.split(" / "))==4:
			pc=1
		else:
			pc=2
		yield {
			'index':str(page+index),
			'title':item.find(name="div",attrs={'class':'pl2'}).a.attrs['title'],
			'image':item.find(name="td",attrs={'width':'100','valign':'top'}).a.img.attrs['src'],
			'author':item.find(name="p",attrs={'class':'pl'}).string.split(" / ")[0],
			'pc':item.find(name="p",attrs={'class':'pl'}).string.split(" / ")[pc],
			'price':item.find(name="p",attrs={'class':'pl'}).string.split(" / ").pop(),
			'score':item.find(name="span",attrs={'class':'rating_nums'}).string,
		}
		index=index+1

	'''
	#使用pyquery解析
	#初始化，返回Pyquery对象
	doc = PyQuery(content)
	#解析网页中<tr class="item">....</tr>信息
	items = doc("tr.item")
	#遍历并解析每本图书具体信息
	index=1
	for item in items.items():
		if len(item.find("p.pl").text().split(" / "))==4:
			pc=1
		else:
			pc=2
		yield {
			'index':str(page+index),
			'title':item.find("div.pl2 a").attr('title'),
			'image':item.find("td a img").attr('src'),
			'author':item.find("p.pl").text().split(" / ")[0],
			'pc':item.find("p.pl").text().split(" / ")[pc],
			'price':item.find("p.pl").text().split(" / ").pop(),
			'score':item.find("span.rating_nums").text(),
		}
		index=index+1
	'''

	'''
	#使用Xpath解析
	#初始化，返回根节点对象
	html = etree.HTML(content)
	#解析网页中<tr class="item">....</tr>信息
	items = html.xpath("//tr[@class='item']")
	#遍历并解析每本图书具体信息
	index=1
	for item in items:
		if len(item.xpath(".//p[@class='pl']/text()")[0].split(" / "))==4:
			pc=1
		else:
			pc=2
		yield {
			'index':str(page+index),
			'title':item.xpath(".//div[@class='pl2']/a/@title")[0],
			'image':item.xpath(".//img[@width='90']/@src")[0],
			'author':item.xpath(".//p[@class='pl']/text()")[0].split(" / ")[0],
			'pc':item.xpath(".//p[@class='pl']/text()")[0].split(" / ")[pc],
			'price':item.xpath(".//p[@class='pl']/text()")[0].split(" / ").pop(),
			'score':item.xpath(".//span[@class='rating_nums']/text()")[0],
		}
	'''

def savefile(content,page,wb):
	print("已提取图书信息，正在保存为文件...")

	'''保存图片'''
	target = ['|',':','*','?','/','\\','<','>','"']
	#创建文件夹
	if not os.path.exists('./豆瓣图书top250榜图片'):
		os.makedirs('./豆瓣图书top250榜图片')
	#循环取出图片地址保存
	try:
		res = requests.get(content['image'],headers=headers)
		#判断文件名内是否有非法字符,并替换
		title = content['title']
		no = content['index']
		for i in target:
			if i in title:
				title = title.replace(i,'')
		with open('./豆瓣图书top250榜图片'+"/"+no+' - '+title+".jpg","wb") as file:
			print("正在保存第"+str(page)+"张图片...")
			file.write(res.content)

		'''保存信息表格'''
		ws = wb.active
		if not '元' in content['price']:
			content['price']=content['price']+'元'
		list = index,title,image,author,pc,price,score = content['index'],content['title'],content['image'],content['author'],content['pc'],content['price'],content['score']
		print('正在保存第'+str(page)+'条信息...')
		ws.append(list)
		wb.save

	except Exception as err:
		print("保存失败，原因是："+str(err))

def main():
	'''创建表格文件'''
	wb=openpyxl.Workbook()
	ws=wb.active
	title=["排名","书名","图片链接","作者","出版社","定价","评分"]
	ws.append(title)

	'''根据页数进行遍历爬取并保存'''
	for i in range(0,226,25):
		html = getpage("https://book.douban.com/top250?start="+str(i),i)
		if html:
			num = 1
			for item in parsepage(html,i): 
				savefile(item,i+num,wb) 
				num = num + 1
		time.sleep(1)

	wb.save('./豆瓣图书top250榜.xlsx')
	print("爬取完毕，所有信息及图片已成功保存")


'''运行主程序'''
if __name__ == '__main__':
	main()