import pymysql
import  requests
from bs4 import BeautifulSoup
import csv
from urllib.parse import urljoin
def get_html(url):
    head={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'}
    try:
        r = requests.get(url)
        r.encoding=r.apparent_encoding
        r.raise_for_status()
        return  r.text
    except Exception as error:
        print(error)
def parser(html):
    soup = BeautifulSoup(html,'lxml')
    out_list = []
    for row in soup.select('tab-book > div.col-md-8.col-sm-8.main > div.g-main > div > ul > li'):
        bookname = row.select(' div.book-info > h4 > a')[0].attrs['title'].strip()
        author = row.select(' div.book-info > div > span')[0].text.strip()
        price = row.select('div.book-info > span > span')[0].text.strip()
        url_1='https://www.ryjiaoyu.com/book'
        url_2=row.select(' div.book-info > h4 > a')[0].attrs['href'].strip()
        new_url=urljoin(url_1,url_2)
        row_data=[bookname,author,price,new_url]
        out_list.append(row_data)
def save_mysql(sql,val,**dbdata):
    try:
        connect=pymysql.Connect(**)