#
import requests
from lxml import etree
import csv
from urllib p
def get_html(url):
    try:
        head = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36"}
        r = requests.get(url=url, headers=head)
        r.encoding = r.apparent_encoding
        r.raise_for_status()
        return r.text
    except Exception as e:
        print(e)


def parser(html):
    doc = etree.HTML(html)
    out_list = []
    for i in etree.xpath('//*[@id="tab-book"]/div[2]/div[3]/div/ul/li'):
        i_data = [
            i.xpath('div[2]/h4/a/text()')[0],
            i.xpath('div[2]/div/span/text()')[0].strip(),
            i.xpath('div[2]/span/span/text()')[0]

            
        ]
        out_list.append(row_data)
    return  out_list


def save_csv(path,concent):
    with open("a+", path,encoding='utf-8', newline=' ')as f:
        csv_write = csv.writer(f)
        csv_write.writerow(concent)



if __name__ == '__main__':
        url = 'https://www.ryjiaoyu.com/book'
        yuanma = get_html(url)
        out_list = parser(yuanma)
        save_csv()





















