#https://www.ryjiaoyu.com/book
import requests
from lxml import etree
import csv
# import pandas as pd
from urllib.parse import urljoin

def get_html(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Referer': 'https://www.baidu.com/',
        'Connection': 'keep-alive'}
    try:
        r = requests.get(url=url,headers=headers)
        r.encoding = r.apparent_encoding
        r.raise_for_status()
        return r.text
    except Exception as e:
        print(e)

def parser(html):
    tree = etree.HTML(html)
    out_list = []
    for row in tree.xpath('//div[@class="block-books block-books-grid"]/ul/li'):
        name = row.xpath('div[@class="book-info"]/h4/a/text()')[0]
        author = row.xpath('div[@class="book-info"]/div/span/text()')[0].strip()
        price = row.xpath('div[@class="book-info"]/span/span/text()')[0]
        intro = row.xpath('div[2]/p/text()')[0]
        qianban = 'https://www.ryjiaoyu.com/'
        houban = row.xpath('div[2]/h4/a/@href')[0]
        full_url = urljoin(qianban, houban)
        row_list = [name,author,price,intro,full_url]
        out_list.append(row_list)
    return out_list

def save_info(path,content):
    with open(path,'w+',newline='',encoding='utf-8')as f:
        w = csv.writer(f)
        w.writerows(content)

if __name__ == '__main__':
    url = 'https://www.ryjiaoyu.com/book'
    html = get_html(url)
    out_list = parser(html)
    save_info('书目信息.csv',out_list)