from lxml import etree
import requests
import pymongo


# class ToScrapeSpider:
#     def __init__(self, url):
#         self.url = url
#         self.client = pymongo.MongoClient("mongodb://localhost:27017/")
#         self.client.drop_database("toscrape")
#         self.db = self.client["toscrape"]
#         self.collection = self.db["toscrape"]
#
#
#     def __request(self):
#         self.__res = requests.get(self.url)
#         return self.__res.text
#
#     def __parse(self, content):
#         self.__tree = etree.HTML(content)
#         self.__items = self.__tree.xpath('//div[@class="quote"]')
#         datas = []
#         for item in self.__items:
#             content = item.xpath('./span[@class="text"]/text()')[0]
#             author = item.xpath('.//small/text()')[0]
#             tags = item.xpath('.//a[@class="tag"]/text()')
#             datas.append({
#                 "content": content[1:-1],
#                 "author": author,
#                 "tags": tags
#             })
#         return datas
#
#     def __save(self, datas):
#         self.collection.insert_many(datas)
#         self.client.close()
#
#
#     def run(self):
#         content = self.__request()
#         datas = self.__parse(content)
#         self.__save(datas)
#
#
# page = 1
# url = f'https://quotes.toscrape.com/page/{page}/'
# spider = ToScrapeSpider(url)
# spider.run()


datas = []
page = 1

while True:
    url = f'https://quotes.toscrape.com/page/{page}/'
    res = requests.get(url)
    tree = etree.HTML(res.content)
    items = tree.xpath('//div[@class="quote"]')


    for item in items:
        content = item.xpath('./span[@class="text"]/text()')[0]
        author = item.xpath('.//small/text()')[0]
        tags = item.xpath('.//a[@class="tag"]/text()')
        datas.append({
            "content": content[1:-1],
            "author": author,
            "tags": tags
        })

    print(f"爬取第{page}页结束")

    next_btn = tree.xpath('//li[@class="next"]')
    if next_btn:
        page += 1
    else:
        break