
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# import requests
# from lxml import etree
# url='http://www.baidu.com'
#
# r=requests.get(url)
# r.encoding=r.apparent_encoding
# y=r.text
# doc=etree.HTML(y)
# ccc=doc.xpath('//input[@id="su"]/@value')[0]
# print(ccc)
import requests
from lxml import etree
import csv
def get_html(url,time=30):
    head = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"}
    try:
        r=requests.get(url=url,params=date,headers=head)
        r.encoding=r.apparent_encoding
        r.raise_for_status()
        return r.text
    except Exception as e:
        print(e)
def parser(html):
    doc = etree.HTML(html)
    out_list=[]
    for row in doc.xpath('//*[@id="book-img-text"]/ul/li/div[2]'):
        row_data=[
            row.xpath("h4/a/text()")[0],
            row.xpath('p[1]/a[1]/text()')[0],
            row.xpath("p[2]/text()")[0].strip(),
            row.xpath("p[3]/span/text()")[0]
        ]
        out_list.append(row_data)
    return out_list
def save_csv(path,content):
    with open(path,"a+",newline='',encoding="utf-8")as l:
        csv_write=csv.writer(l)
        csv_write.writerows(content)
if __name__ == '__main__':
    for t in range(1,6):
        url = 'http://www.bspider.top/qidian/'
        date={'page':t}
        html=get_html(url)
        out_list=parser(html)
        path='d:\\赵华超.csv'
        save_csv(path,out_list)