"""
author：fc
date：  2021/9/22
"""
# 尝试多线程的使用
# https://www.qiushibaike.com/text/
# 使用：A线程爬取奇数页信息，B线程爬取偶数页信息，以加快爬取速度

import re
from urllib.request import Request, urlopen
import threading

from util import constant_data


def get_url_data(url_str):
    """
    爬取url原始内容
    :param url_str:
    :return:
    """
    url = Request(url_str, headers=constant_data.headers_firefox)
    qiushibaike = urlopen(url).read().decode("utf-8", "ignore")
    # print(row_data)
    return qiushibaike


def data_filter(data):
    """
    数据过滤，返回正则后的数据
    :param data:
    :return:
    """
    pat = '<div class="content">\n<span>\s\n*(.*?)\s\n*</span>'
    if (len(data) > 0):
        filter_data = re.compile(pat).findall(data)
        # 将list中每个str的<br/>字符串去掉
        filter_data = [re.sub("<br/>", '', str_data) for str_data in filter_data]
        return filter_data
    else:
        return "空数据"


class test_thread(threading.Thread):  # 固定格式，设置其为线程类
    # 里面有两个默认方法 （1）init（self）初始化方法,self指向它自己
    #                 (2) run(）方法，线程应该完成的任务

    def __init__(self,i):
        self.i=i
        threading.Thread.__init__(self)  # 初始化线程

    def run(self):  # 定义任务
        print(f"--------线程{self.i}开始执行--------")
        data_crawling("当前执行线程："+self.i)


def data_crawling(console_log):
    row_url = "https://www.qiushibaike.com/text/page/"
    for i in range(1, 10):
        row_data = get_url_data(row_url + str(i))
        filter_data = data_filter(row_data)
        for j in range(0, len(filter_data)):
            print()
            print(f"{console_log}=======第{i}页的第{j}个段子内容是：\n{filter_data[j]}")


def thread_test():
    """
    线程交叉执行-并行
    :return:
    """
    theads=["thread_A","thread_B"]
    for i in theads:  # 启动2个线程
        thread_crawl_ol = test_thread(i)  # 实例化线程,爬取奇数页
        thread_crawl_ol.start()  # 线程启动

if __name__ == '__main__':
    thread_test()
