import urllib.request
import re
import pymysql


con = pymysql.Connect(user="root", password="123456")
cur = con.cursor()
cur.execute("create  database if not exists scrape charset=utf8")
cur.execute("use scrape")
# 修正创建表语句，补上缺失的右括号
cur.execute('CREATE TABLE IF NOT EXISTS quotes (id INT AUTO_INCREMENT PRIMARY KEY,quote_text TEXT,author_name VARCHAR(255),tags TEXT)')
# 修正删除表数据的语句，目标表名改为 quotes
cur.execute('delete from quotes')
con.commit()

for page_num in range(1, 10):
    url = f"https://quotes.toscrape.com/page/{page_num}/"
    try:
        with urllib.request.urlopen(url) as response:
            html = response.read().decode('utf-8')
            quotes = re.findall(r'<span class="text" itemprop="text">“(.*?)”</span>', html)
            texts = re.findall(r'<span class="text" itemprop="text">“(.*?)”</span>', html, re.S)
            authors = re.findall(r' <small class="author" itemprop="author">(.*?)</small>', html, re.S)
            tags = re.findall(r'<meta class="keywords" itemprop="keywords" content="(.*?)" /', html, re.S)
            types = re.findall(r'<a class="tag" style="font-size: (.*?)px" href="/tag/(.*?)/">(.*?)</a>', html, re.S)
            if not quotes:
                break
            for quote, author, tags_str in zip(quotes, authors, tags):
                # 避免变量名重复，用新变量存储处理后的标签字符串
                tag_str_joined = ", ".join(tags_str.split(","))
                insert_query = "INSERT INTO quotes (quote_text, author_name, tags) VALUES (%s, %s, %s)"
                values = (quote, author, tag_str_joined)
                cur.execute(insert_query, values)
            con.commit()
    except urllib.error.URLError as e:
        print(f"访问 {url} 时发生网络错误: {e}")
    except Exception as e:
        print(f"处理 {url} 时出现未知错误: {e}")

cur.close()
con.close()

import requests
from lxml import html
from pymongo import MongoClient


client = MongoClient('mongodb://localhost:27017/')
db = client['scrape']
collection = db['quotes']
collection.delete_many({})

for page_num in range(1, 10):
    url = f"https://quotes.toscrape.com/page/{page_num}/"
    try:
        response = requests.get(url)
        response.raise_for_status()
        tree = html.fromstring(response.text)
        quotes = tree.xpath('//span[@class="text"]/text()')
        authors = tree.xpath('//small[@class="author"]/text()')
        tags_list = tree.xpath('//meta[@class="keywords"]/@content')
        if not quotes:
            break
        for quote, author, tags_str in zip(quotes, authors, tags_list):
            quote = quote.strip('“”')
            tags = tags_str.split(',')
            data = {
                'quote_text': quote,
                'author_name': author,
                'tags': tags
            }
            collection.insert_one(data)
    except requests.RequestException as e:
        print(f"访问 {url} 时发生网络错误: {e}")
    except Exception as e:
        print(f"处理 {url} 时出现未知错误: {e}")
client.close()
