"""
author：fc
date：  2021/9/22
"""
# 网址：https://weixin.sogou.com/,ip感觉被封了

# 引入外部定义模块
import time  # 延时模块
import re
from urllib import error, request
# 引入自定义模块
from util import constant_data


def use_proxy(proxy_addr, url):
    """
    代理服务器使用，避免爬取网站时，自身ip被封锁
    :param proxy_addr: 带的自身ip的网站
    :param url: 要爬取信息的网址
    :return:
    """
    try:
        # 开启全局服务器代理，只是一个开关
        proxy = request.ProxyHandler({"https": proxy_addr})  # 代理服务器配置
        opener = request.build_opener(proxy,request.HTTPSHandler)
        request.install_opener(opener)
        # 模拟火狐浏览器
        req = request.Request(url, headers=constant_data.headers_firefox)
        # 发送请求
        data=request.urlopen(req).read().decode("utf-8","ignore")
        return data
    except error.URLError as e:  # 遇到urlError异常，将异常输出，并延时5秒
        print(e)
        time.sleep(5)
    except Exception as e:
        print(e)


# 定义变量
key = "python" # 搜索关键字
url="https://weixin.sogou.com/weixin?type=2&query=" # 目标网址
proxy = "127.0.0.1:8888"  # 代理服务器 经过fiddler中专一下
sougou_list_python=[]
if __name__ == '__main__':
    for i in range(0, 10):
        key = request.quote(key)  # 为关键字编码
        page_url = url + key + "&page=" + str(i)
        page_data = use_proxy(proxy,page_url)
        print(len(str(page_data)))
        pat1='<h3>\n\s*<a .*? href="(.*?)"'
        rs1=re.compile(pat1,re.S).findall(str(page_data))  # re.S让.匹配换行
        print(rs1)
        rs1=["https://weixin.sogou.com/"+strin for strin in rs1]  # 今天唯一学到的就这个了呗
        print(rs1)
        if (len(rs1)==0):
            print("此次（"+str(i)+"页没成功")
            continue
        for j in range(0,len(rs1)):
            article_content = use_proxy(proxy, rs1[j])
            with open("../thesefiles/微信搜狗文章/python关键词-第"+str(i)+"页第"+str(j)+"篇文章.html","w",encoding="utf-8") as fh:
                fh.write(article_content)

