# BeautifulSoup爬取豆瓣图书top250

import requests
from bs4 import BeautifulSoup


# 发出请求获得html源码
def get_html(url):
    # 伪装成浏览器
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
    resp = requests.get(url, headers=headers).text
    return resp


# 解析页面
def html_parse():
    for url in all_page():
        soup = BeautifulSoup(get_html(url), 'lxml')
        alldiv = soup.find_all('div', class_='pl2')
        names = [a.find('a')['title'] for a in alldiv]
        allp = soup.find_all('p', class_='pl')
        authors = [p.get_text() for p in allp]
        starspan = soup.find_all('span', class_='rating_nums')
        stars = [s.get_text() for s in starspan]
        sumspan = soup.find_all('span', class_='inq')
        sums = [i.get_text() for i in sumspan]

        for name, author, star, sum in zip(names, authors, stars, sums):
            name = '书名: ' + str(name) + '\n'
            author = '作者: ' + str(author) + '\n'
            star = '星级: ' + str(star) + '\n'
            sum = '简介: ' + str(sum) + '\n'
            data = name + author + star + sum
            f.writelines(data + '-----------------------------' + '\n')


def all_page():
    base_url = 'https://book.douban.com/top250?start='
    url_list = []
    for page in range(0, 250, 25):
        all_url = base_url + str(page)
        url_list.append(all_url)
    return url_list


filename = '豆瓣图书top250.txt'
f = open(filename, 'w', encoding='utf-8')

html_parse()
f.close()
print('保存成功!')
