from lxml import etree
import requests
import json
import time
import copy
from requests import exceptions,ConnectionError
from fake_useragent import UserAgent

requests.packages.urllib3.disable_warnings()

ua = UserAgent()
num = 0
num1 = 0


# 遍历博客文章链接
def TitleUrl():
    url = 'https://blog.csdn.net/weixin_46192679'
    r = requests.request('GET', url, headers={'User-Agent': ua.chrome}, verify=False)
    content = r.text
    html = etree.HTML(content)
    title_url = html.xpath('//*[@class="article-item-box csdn-tracking-statistics"]/h4/a/@href')
    list1 = []
    for i in title_url:
        list1.append(i)
    return list1


# proxy代理信息列表
def proxy_arr():
    url = 'https://ip.jiangxianli.com/api/proxy_ips?page=1&country=%E4%B8%AD%E5%9B%BD'
    url = requests.get(url, headers={'User-Agent': ua.chrome}, verify=False)
    versioninfo = url.text
    versioninfoPython = json.loads(versioninfo)
    arr = versioninfoPython['data']['data']
    return arr


# 代理遍历方法
def run_arr():
    arr = proxy_arr()
    proxy_list = []
    proxy_dict = {}
    for i in arr:
        port = i['port']
        ip = i['ip']
        proxy = 'http://%s:%s' % (ip, port)
        proxy_dict['http'] = proxy
        proxy_dict_list = copy.deepcopy(proxy_dict)
        proxy_list.append(proxy_dict_list)
    return proxy_list


if __name__ == '__main__':
    while num != -1:
        blog_list = TitleUrl()  # 博客文章链接列表
        run_proxy_list = run_arr()  # 代理dict列表
        for x in run_proxy_list:
            for i in blog_list:
                try:
                    r = requests.get(i, headers={'User-Agent': ua.chrome}, proxies=x, timeout=20, verify=False)
                except exceptions.Timeout as error:
                    print('error：请求超时')
                except expression.ConnectionError as error:
                        print('ConnectionError错误，超时错误！')
                else:
                    if r.status_code == 200:
                        num += 1
                        file = '访问成功，第%s次' % num
                        fo = open('conf.log', 'w')
                        fo.write(file + "\n")
                        fo.close()
                        time.sleep(1)  # 每次访问间隔1秒
                    else:
                        file = '访问失败：%s' % r.status_code
                        fo = open('openURL_error.txt', 'w+')
                        fo.write(file + "\n")
                        fo.close()
        num1 += 1
        print('第%s轮' % str(num1))
        time.sleep(10)
