# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 10:31:55 2020

@author: Administrator
"""


from fake_useragent import UserAgent
 
ua = UserAgent()
# ie浏览器的user agent
print(ua.ie)
 
# opera浏览器
print(ua.opera)
 
# chrome浏览器
print(ua.chrome)
 
# firefox浏览器
print(ua.firefox)
 
# safri浏览器
print(ua.safari)
 
# 最常用的方式
# 写爬虫最实用的是可以随意变换headers，一定要有随机性。支持随机生成请求头
print(ua.random)
print(ua.random)
print(ua.random)

import urllib.request
import random

ip=["122.72.56.112:8080","118.144.67.50:8118"]
url1 = "http://www.whatismyip.com.tw"
proxy_support = urllib.request.ProxyHandler({"http":random.choice(ip)})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders=[("User-Agent",'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36')]
urllib.request.install_opener(opener)
response=opener.open(url1)
url = response.read().decode("utf-8")
print(url)

#代理ip网站
#有代理：https://www.youdaili.net/Daili/guonei/
#66代理：http://www.66ip.cn/6.html
#西刺代理：https://www.xicidaili.com/
#快代理：https://www.kuaidaili.com/free/
#根据网页结果，适用正则表达式匹配
#这种方法适合翻页的网页
'''
import requests
import random
import re
import time
 
 
 
def get_ip():
    url='https://www.kuaidaili.com/free/inha/'
    url_list=[url+str(i+1) for i in range(1)]
    print(url_list)
    ip_list = []
    for i in range(len(url_list)):
        url =url_list[i]
        html = requests.get(url=url,).text
        regip = '<td.*?>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>.*?<td.*?>(\d{1,5})</td>'
        matcher = re.compile(regip,re.S)
        ipstr = re.findall(matcher, html)
        time.sleep(1)
 
        for j in ipstr:
            ip_list.append(j[0]+':'+j[1])
    print(ip_list)
    print('共收集到%d个代理ip' % len(ip_list))
    return ip_list
def valVer(proxys):
    badNum = 0
    goodNum = 0
    good=[]
    for proxy in proxys:
        print("现在正在检测ip",proxy)
        try:
            requests.get('http://wenshu.court.gov.cn/', proxies={"http":"http://"+str(proxy)}, timeout=2)
        except:
            badNum+=1
            print('connect failed')
        else:
            goodNum=1
            good.append(proxy)
            print('success')
 
    print ('success proxy num : ', goodNum)
    print( 'bad proxy num : ', badNum)
    print(good)
 
if __name__ == '__main__':
    ip_list=get_ip()
    valVer(ip_list)
'''
import requests
import random
import re
import time
 
 
 
def get_ip():
    url='https://www.kuaidaili.com/free/inha/'
    url_list=[url+str(i+1) for i in range(10)]
    print(url_list)
    ip_list = []
    for i in range(len(url_list)):
        url =url_list[i]
        html = requests.get(url=url,).text
        regip = '<td.*?>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>.*?<td.*?>(\d{1,5})</td>'
        matcher = re.compile(regip,re.S)
        ipstr = re.findall(matcher, html)
        time.sleep(1)
 
        for j in ipstr:
            ip_list.append(j[0]+':'+j[1])
    print('共收集到%d个代理ip' % len(ip_list))
    print(ip_list)
    return ip_list
def valVer(proxys):
    badNum = 0
    goodNum = 0
    good=[]
    for proxy in proxys:
        try:
            proxy_host = proxy
            protocol = 'https' if 'https' in proxy_host else 'http'
            proxies = {protocol: proxy_host}
            print('现在正在测试的IP：',proxies)
            response = requests.get('http://www.baidu.com', proxies=proxies, timeout=2)
            if response.status_code != 200:
                badNum += 1
                print (proxy_host, 'bad proxy')
            else:
                goodNum += 1
                good.append(proxies)
                print (proxy_host, 'success proxy')
        except Exception as e:
            print( e)
            # print proxy_host, 'bad proxy'
            badNum += 1
            continue
    print ('success proxy num : ', goodNum)
    print( 'bad proxy num : ', badNum)
    print(good)
 
if __name__ == '__main__':
    ip_list=get_ip()
    valVer(ip_list)




