"""
单线程爬虫
确定url地址
https://www.qiushibaike.com/text/page/1/
https://www.qiushibaike.com/text/page/2/
抓取所有的数据
"""
import requests
from lxml import etree


class QiuBaiSpider(object):
    def __init__(self):
        self.base_url = "https://www.qiushibaike.com/text/page/{}/"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36"
        }

    def get_url_list(self):
        """
        生成13个url地址的方法
        :return: 13个url地址列表
        """
        return [self.base_url.format(i) for i in range(1, 14)]

    def send_request(self, url):
        """
        发送请求获取响应
        :param url:
        :return:
        """
        response = requests.get(url, headers=self.headers)
        return response.content

    def parse_data(self, response):
        """
        解析数据方法
        :return:
        """
        # 将html字符串 转换成element对象
        html = etree.HTML(response)
        # 对数据进行分组
        div_list = html.xpath("//div[@class='col1 old-style-col1']/div")
        # 准备保存数据的列表
        content_list = []
        for div in div_list:
            item = {}
            item["username"] = div.xpath(".//h2/text()")[0].strip()
            content_list.append(item)

        return content_list

    def save_data(self, content_list):
        """
        保存数据的方法
        :param content_list: 数据列表
        :return:
        """
        for content in content_list:
            print(content)

    def run(self):
        # 1.初始化url地址,准备13个url地址
        url_list = self.get_url_list()
        # 2.遍历这个url地址 发送请求,获取响应
        for url in url_list:
            import time
            time.sleep(3)
            response = self.send_request(url)
            # 3.提取数据
            content_list = self.parse_data(response)
            # 4.保存数据
            self.save_data(content_list)


if __name__ == '__main__':
    qiubai = QiuBaiSpider()
    qiubai.run()
