from lxml import etree
import requests
import csv
import pandas
def get_html():
    head = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"}
    try:
      r = requests.get(url=url,headers=head)
      r.encoding = r.apparent_encoding
      r.raise_for_status()
      return r.text
    except Exception as e:
        print(e)
def parser(html):
    doc=etree.HTML(html)
    out_list=[]
    for row in doc.xpath(' //*[@id="tab-book"]/div[2]/div[3]/div/ul/li/div[2]'):
        row_data=[
            row.xpath('.//h4/a/text()')[0],
            row.xpath('.//div/span/text()')[0],
            row.xpath('.//span/span/text()')[0],
        ]
        out_list.append(row_data)
    return out_list
def save_csv(item,path):
    with open(path,"a+",newline='',encoding="utf-8")as f:
        csv_write = csv.writer(f)
        csv_write.writerows(item)
if __name__ == '__main__':
    for i in range(1,5):
     url = 'http://www.ryjiaoyu.com/book'
     html =get_html()
     out_list=parser(html)
     save_csv(out_list,"tushu.csv")
