import csv
import sqlite3
import requests
from lxml import etree


# 自定义一个函数对方法进行封装，这是请求并且得到网页html函数
def get_html(url):
    try:
        # 请求的头部信息，模拟浏览器进行请求，如果不加上会爬起失败
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                          "AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/103.0.5060.114 Safari/537.36 Edg/103.0.1264.62"
        }
        response = requests.get(url, headers=headers)
        html = response.text
        return html
    except Exception as error:
        print("网页获取失败！")
        print(error)


# 自定义一个解析函数，得到了网页数据就要对其进行解析
def parse():
    result_list = []

    url = f"https://www.ryjiaoyu.com/book?page={1}"
    html = get_html(url)
    # 以列表的形式保存

    html = etree.HTML(html)
    datas = html.xpath("//div[@class='container']/div/div/div/div/ul/li/div[2]")
    for row in datas:
        name = row.xpath("h4/a/text()")[0]
        # 链接
        url_two = "https://www.ryjiaoyu.com/" + row.xpath("h4/a/@href")[0]
        auther = row.xpath("div/span/text()")[0].strip()
        price = row.xpath("span/span/text()")[0]
        lis = [name, url_two, auther, price]
        result_list.append(lis)
    with open("人邮图书.csv", "w", newline="", encoding="utf-8") as f:
        write = csv.writer(f)
        write.writerows(result_list)

def create_insert_table():
    conn = sqlite3.connect('books.db')
    # 创建游标对象
    c = conn.cursor()
    # 创建表
    c.execute('''CREATE TABLE books
                 (title text, link text, author text, price real)''')
    # 打开csv文件
    with open('人邮图书.csv', newline='', encoding='utf-8') as csvfile:
        # 创建reader对象
        reader = csv.reader(csvfile)
        # 遍历每一行数据
        for row in reader:
            # 插入数据到表中
            c.execute("INSERT INTO books VALUES (?, ?, ?, ?)", row)
    # 提交事务
    conn.commit()
    # 关闭数据库连接
    conn.close()