import requests
from lxml import etree
from proxy_helper import ProxyHelper
import re

proxy_helper = ProxyHelper()
for page in range(1, 14):
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
    }
    url = "https://book.douban.com/latest"
    params = {
        "subcat": "全部",
        "p": page
    }
    response = proxy_helper.requests_with_proxy(url, params=params, headers=headers)
    html_str = response.content.decode()
    root = etree.HTML(html_str)
    li_list = root.xpath("//ul[@class='chart-dashed-list']/li")
    for li in li_list:
        title = li.xpath("./div[@class='media__body']/h2[@class='clearfix']/a[@class='fleft']/text()")[0]
        info = li.xpath(".//div[@class='media__body']/p[@class='subject-abstract color-gray']/text()")[0]
        info = re.sub(r'\s', '', info)
        score = "".join(li.xpath(
            "./div[@class='media__body']/p[@class='clearfix w250']/span[@class='font-small color-red fleft']/text()"))
        print(title, info, score)
