import requests
from lxml import etree
import csv
def get_html(url,time=30):
    head = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36"}
    try:
        r = requests.get(url,timeout=time)
        r.encoding = r.apparent_encoding
        r.raise_for_status()
        return r.text
    except Exception as e:
        print(e)

def parser(html):
    doc=etree.HTML(html)
    out_list=[]

    for row in doc.xpath("//*[@class='book-img-text']//li/*[@class='book-mid-info']"):
        row_data=[
            row.xpath("h4/a/text()")[0],
            row.xpath("p[@class='author']/a/text()")[0],
            row.xpath("p[2]/text()")[0].strip(),
            row.xpath("p[@class='update']/span/text()")[0]
        ]
        out_list.append(row_data)
    return out_list

def save_csv(item,path):
    with open(path,"a+",newline="",encoding="utf-8")as f:
        csv_write = csv.writer(f)
        csv_write.writerows(item)

if __name__ == '__main__':
    for i in range(1,6):
        url = "http://www.bspider.top/qidian/?page={0}".format(i)
        html = get_html(url)
        out_list = parser(html)
        dizhi = "d:\\SHUJU2.csv"
        save_csv(out_list, dizhi)