import requests
from requests.exceptions import RequestException
import time,json
from bs4 import BeautifulSoup

#爬取指定url页面信息
def getPage(url):
    try:
        res = requests.get(url)
        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException as err:
        print(err)
        return None

#解析爬取网页中的内容，并返回字段结果
def parsePage(content):
    #===========BeautifulSoup===========
    soup = BeautifulSoup(content,"lxml")
    items = soup.find_all(name="tr",attrs={"class":"item"})
    for item in items:
        try:
            quote = item.find(name="span",attrs={"class":'inq'}).string
            if quote != "":
                quote = quote
        except:
            quote = ""
        yield {
            "书名": item.select("div.pl2 a")[0].get_text().strip(),
            "引言": quote,
            "信息": item.find(name="p",attrs={"class":'pl'}).string,
            "评分": item.find(name="span",attrs={"class":'rating_nums'}).string,
            "图书封面":item.find(name="img",attrs={'width':'90'}).attrs["src"],
        }

#执行文件追加写操作
def writeFile(content):
    with open("./result_bs4.txt","a",encoding="utf-8") as f:
        f.write(json.dumps(content,ensure_ascii=False)+"\n")

#主程序函数，负责调度执行爬虫处理
def main(offset):
    url = 'https://book.douban.com/top250?start='+str(offset)
    html = getPage(url)
    if html:
        for item in parsePage(html):
            writeFile(item)

#判断当前执行是否为主程序，并遍历调用主函数爬取数据
if __name__ == '__main__':
    for i in range(10):
        main(offset=i*25)
        time.sleep(1)