import requests
from lxml import etree
import csv


def get_html(url):
    head = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36'}
    try:
        r = requests.get(url=url, params=data, headers=head)
        r.encoding = r.apparent_encoding
        r.raise_for_status()
        return r.text
    except Exception as err:
        print(err)


def parser(html):
    doc = etree.HTML(html)
    out_list = []
    for row in doc.xpath("//*[@id='book-img-text']/ul/li/div[2]"):
        row_data = [row.xpath("h4/a/text()")[0],
            row.xpath("p[1]/a[1]/text()")[0],
            row.xpath("p[2]/text()")[0].strip(),
            row.xpath("p[3]/span/text()")[0]
        ]
        out_list.append(row_data)
    return out_list


def save_csv(item, path):
    with open(path, "a+",  encoding="utf-8") as f:
        csv_write = csv.writer(f)
        csv_write.writerows(item)


if __name__ == '__main__':
    for i in range(1,6):
        url = "http://www.bspider.top/qidian/?"
        data = {"page":i}
        yuanma = get_html(url)
        shuju = parser(yuanma)
        dizhi = "d:\\xiaoshuo.csv"
        save_csv(shuju, dizhi)
