# coding=utf-8
#创建几个库
import re
import xlwt#创建excel表格
import requests
from bs4 import BeautifulSoup

#爬取并解析网页信息
def getHtml(url):
    #定义getHtml(url)函数
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'}
    #爬取的网页名称https://book.douban.com/top250
    page = requests.get(url, headers=headers)
    #发出一个get请求并得到响应，用page来命名
    html = page.text
    #将page用text格式储存在html中
    return html


if __name__ == '__main__':
    Workbook = xlwt.Workbook()
    #创建一个Workbook
    sheet = Workbook.add_sheet('豆瓣图书Top250')
    #创建一个sheet
    sheet.write(2, 2, '书名')
    sheet.write(2, 3, '作者')
    sheet.write(2, 4, '译者')
    sheet.write(2, 5, '出版单位')
    sheet.write(2, 6, '出版时间')
    sheet.write(2, 7, '定价')
    sheet.write(2, 8, '豆瓣评分')
    sheet.write(2, 9, '评价人数')
    sheet.write(2, 10, '一句话')
    #写入excel 参数对应 行 列 值

    i = 3
    j = 3
    k = 3
    m = 3
    #定义几个变量i,j,k,m
    for page in range(0, 100, 25):
    #一个for循环，步长值为25，从0到100（不包含100）
    #在该循环中，i，j，k，m控制单元格的行，数字控制列，数字后为传递的参数
        url = 'https://book.douban.com/top250?start={0}'.format(page)
        html = getHtml(url)
        Soup = BeautifulSoup(html, 'html.parser')
        #解析html
        names = Soup.find_all('div', class_='pl2')
        #搜索div标签名

        for name in names:
        #遍历names，每次遍历把取得的元素赋值给临时变量name，
            book = name.find('a')
            book = book.text.strip()
            book = book.replace(' ', '')
            #用‘’替换‘ ’
            sheet.write(i, 2, book)
            #i：单元格的行 2：单元格的列 book：传递的参数
            i += 1

        Infos = Soup.find_all('p', class_='pl')
        #下一个标签
        for Info in Infos:
            r = 1
            authorinfo = Info.text
            authors = authorinfo.split('/')
            if len(authors) < 4:
                sheet.write(j, 5, authors[0])
                sheet.write(j, 6, authors[1])
                sheet.write(j, 7, authors[2])
                j += 1
                continue
            sheet.write(j, 3, authors[0])
            if authorinfo.count('/') == 4:
                sheet.write(j, 4, authors[r])
                r += 1
            sheet.write(j, 5, authors[r])
            sheet.write(j, 6, authors[r + 1])
            sheet.write(j, 7, authors[r + 2])
            j += 1

        rating_nums = Soup.find_all('div', class_='star clearfix')
        for rating in rating_nums:
            star = rating.find_all('span')
            sheet.write(k, 8, star[1].text)
            reg = r'\d+'
            vote = re.findall(reg, star[2].text)
            sheet.write(k, 9, vote)
            k += 1
        quotes = Soup.find_all('p', class_='quote')
        for quote in quotes:
            sheet.write(m, 10, quote.text)
            m += 1

    Workbook.save(r'C:\Users\hp\Desktop\爬虫数据.csv')
    #将表格保存为爬虫数据.csv文件