#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 爬取搜狗搜索微信信息
# 版本2 多线程爬取 获取链接() 生成html() 队列puturl 控制器 每个1分钟判断是否爬取完毕

import re
import urllib.request
import time

# headers = {"User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"}
# # req.add_header()
# opener = urllib.request.build_opener()
# opener.addheaders = [headers]
# # 设置全局变量
# urllib.request.install_opener(opener)
# 装链接
from urllib.error import URLError

myList = []

# 添加代理
def use_proxy(proxy_addr,url):
    import urllib.request
    proxy=urllib.request.ProxyHandler({"http":"http://"+proxy_addr})
    # print (proxy_addr)
    headers=("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5193.400 QQBrowser/10.0.1066.400")
    opener=urllib.request.build_opener(proxy)
    opener.addheaders=[headers]
    # data=opener.open(url).read().decode('utf-8')
    try:
        response=opener.open(url)
        data=response.read().decode('utf-8')
        # print(data)
        return data
    except URLError as e:
        print("出现异常")
        print(e.reason)
        time.sleep(10)


# 获取所有文章的链接
def getListUrl(pagestart, pageend, key, proxy):
    try:
        # 不需要编码
        page = pagestart
        keywords = urllib.request.quote(key)
        link_pattern = '<div class="txt-box">.*?(http://.*?)"'
        for i in range(pageend):
            page += i
            url = "https://weixin.sogou.com/weixin?query="+ keywords +"&_sug_type_=&sut=20772&lkt=1%2C1558569693364%2C1558569693364&s_from=input&_sug_=n&type=2&sst0=1558569693467&page="+ str(page)+"&ie=utf8&w=01019900&dr=1"
            # print(url)
            url_read = use_proxy(proxy, url)
            # print(url_read)
            # 获取链表
            myList.append(re.compile(link_pattern, re.S).findall(url_read))
            # print(myList)
            print("共获得第"+str(len(myList))+"页")

    except URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
            time.sleep(10)
    except Exception as e:
        print(e)
        time.sleep(1)

def handleUrl(proxy):
    html_head = '''<!DOCTYPE html>
    <html lang="zh-CN">
        <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
        <head>
            <title>微信爬取练习</title>
        </head>
    
    <body>'''

    open_head = open("D:/git/python/com/google/DeepPython/Class4/7.html", "wb")
    open_head.write(html_head.encode("utf-8"))
    open_head.close()

    h2_title = '<h2 class="rich_media_title" id="activity-name">(.*?)</h2>'
    open_content = open("D:/git/python/com/google/DeepPython/Class4/7.html", "ab")
    article_content = 'id="js_content">(.*?)id="js_sg_bar"'

    for i in range(len(myList)):
        # print(len(myList[i]))
        for j in range(len(myList[i])):
            # 初始化标题 内容
            h2_title_partten = "没有标题"
            article_content_partten = "没有内容"
            # 获取每个数组的值 并获取其中的标题和内容
            # print(myList[i][j])
            per_value = myList[i][j]

            # 去掉参数
            per_value = per_value.replace("amp;", "")
            # print(per_value)
            # content_data = use_proxy(proxy, per_value)
            # print(content_data)
            try:
                content_data = urllib.request.urlopen(per_value).read().decode('utf-8')
                # 通过正则获取标题 内容
                # h2_title = 'id="activity-name">(.*?)</h2>'
                # content_data = use_proxy(proxy,per_value)
                # print(content_data)
                h2_title_partten = re.compile(h2_title, re.S).findall(content_data)
                article_content_partten = re.compile(article_content, re.S).findall(content_data)
                # print(h2_title_partten[0].strip())
                # print(article_content_partten)
                if(h2_title_partten != []):
                    current_title = h2_title_partten[0]
                if([] != article_content_partten):
                    current_content = article_content_partten[0]
                data_all = "<p>标题为:"+current_title+"</p><p>内容为:"+current_content+"</p></br>"
                open_content.write(data_all.encode('utf-8'))
                print("第"+str(i)+"个网页,第"+str(j)+"次处理")
            except URLError as e:
                print("解析内容出现异常")
                print(e.reason)
                time.sleep(10)

    open_content.close()
    html12 = '''
        </body>
    <html>'''
    html12_tail = open("D:/git/python/com/google/DeepPython/Class4/7.html", "ab")
    html12_tail.write(html12.encode("utf-8"))
    html12_tail.close()

#  代理 关键字 开始页 结束页
# proxy = "110.73.32.7:6666"
# proxy = "104.236.35.98:8866"
proxy = "112.87.70.204:9999"
# proxy = "112.85.164.219:9999"
# proxy = "58.247.127.145:53281"
# proxy = "113.119.38.158:3128"
# proxy = "121.63.198.84:6668"
# proxy = "112.85.170.23:9999"
# proxy = "112.85.128.210:9999"
# proxy = "112.85.170.23:9999"
# proxy = "27.43.187.35:9999"
# proxy = "	222.189.191.66:9999"


key = "芋道源码"
pagestart = 1
pageend = 3
listUrl = getListUrl(pagestart, pageend, key, proxy)
handleUrl(proxy)