import requests,openpyxl,time
from bs4 import BeautifulSoup

#获取url地址信息
def open_url(url):
	headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
	}
	try:
		res = requests.get(url,headers=headers)
		if res.status_code == 200:
			res = res.text
			return res
		else:
			return None
	except:
		return None

#解析信息
def parse_url(res):
	soup = BeautifulSoup(res,'lxml')
	title_list = soup.select('div.pl2 a')
	msg_list = soup.select('p.pl')
	score_list = soup.select('span.rating_nums')
	#print(len(title_list))
	for i in range(len(title_list)):
		try:
			title = title_list[i]['title']
			msg = msg_list[i].get_text().split('/')
			score = score_list[i].string
			#print(title)
			#print(msg)
			if len(msg) == 4:
				author,shop,atime,price = msg
			elif len(msg) == 5:
				del msg[1]
				author,shop,atime,price = msg
			yield [title.strip(),author.strip(),shop.strip(),atime.strip(),price.strip(),score]
		except IndexError as err:
			print(err)
			pass


#保存信息
def save_file(data):
	wb = openpyxl.load_workbook('./豆瓣图书TOP250.xlsx')
	ws = wb['Sheet']
	for i in range(len(data)):
		try:
			print('正在保存第%d条数据...' % (i+1))
			ws.append(data[i])
		except ValueError as err:
			print(err)
			pass
	wb.save('./豆瓣图书TOP250.xlsx')

#主程序
def main(url):
	content = []
	res = open_url(url)
	for item in parse_url(res):
		content.append(item)
	#print(len(content))
	save_file(content)
	
if __name__ == '__main__':
	wb = openpyxl.Workbook()
	ws = wb.active
	title= ['标题','作者','出版商','创作时间','价格','评分']
	ws.append(title)
	wb.save('./豆瓣图书TOP250.xlsx')
	for i in range(0,2):
		page = i *25
		print('正在爬取第%d页信息' % (i+1))
		url = 'https://book.douban.com/top250?start=%d' % page
		main(url)
		time.sleep(1)
