#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :2.1.1 糗事百科（进程池）.py
# @Time      :2020/7/15 23:39
# @Author    :亮亮
# @说明       :糗事百科改版后只能看两页
# @总结       :
import requests
from fake_useragent import UserAgent
import re
import time
import multiprocessing


def get_html(url):
    """url---->response"""

    # 构建伪装头,（随机模式）
    headers = {
        "User-Agent": UserAgent().random
    }

    # 构建代理ip
    # proxies = {
    #     "http": "117.136.106.46"
    # }

    # 构建响应
    response = requests.get(url, headers=headers)

    # 提取内容
    info = response.text

    return info


def re_html(html, str_title, content):
    """单个网页，标题表达式，内容表达式----->要提取内容的字符串"""

    # title = re.findall(r"{}".format(str_title), html)
    content = re.findall(r"{}".format(content), html)
    return content


def save_html(name, content):
    """保存到文件，
    字符串-----文件
    """
    with open(name, "a", encoding="utf-8") as f:
        for i in content:
            f.write(i + "\n\n\n")


def main2(new_url, i):

    # 构建单网页抓取函数
    html = get_html(new_url)
    print("正在打印第{}页".format(i))

    # 判断网页是否结束
    if not html:
        print("此页为空白")

    # 构建正则函数
    str_title = '<span class="title"><a target="_blank" href=".*?">(.*?)</a></span>'
    # content = '<dd class="content">\s*?<p>(.+)</p>'
    content = '<dd class="content">\s*<p>\s*(.+)\s*</p>\s*'
    content = re_html(html, str_title, content)

    # 保存内容
    # name = "第{}页.txt".format(i)
    save_html("段子.txt", content)

    # 暂停一秒


def main():
    # 输入
    old_url = "http://qiushidabaike.com/index_{}.html"
    get_num = int(input("输入要爬的页数:"))

    # 创建进程池
    po = multiprocessing.Pool(5)

    # 向进程池中添加每页下载任务
    for i in range(1, get_num+1):
        new_url = old_url.format(i)
        po.apply_async(main2, args=(new_url, i))

    # 关闭并阻塞进程池
    po.close()
    po.join()

    print("爬取结束")


if __name__ == "__main__":
    main()
