'''
获取前两页的所有代理IP地址
'''
from bs4 import BeautifulSoup
import socket

import urllib.request
headers={'User-Agent':'Mozilla/5.0(Windows NT 10.0;WOW64;rv:50.0) Gecko/20100101 Firefox/50.0'}

def getProxyIP():
    proxy=[]
    for i in range(1,2):
        try:
            url='http://www.xicidaili.com/nn/%d'%i
            print(url)
            req = urllib.request.Request(url, headers=headers)
            res = urllib.request.urlopen(req)
            content = res.read().decode('utf-8')
            soup=BeautifulSoup(content,'lxml')
            ips=soup.find_all('tr')
            for x in range(1,len(ips)):
                ip=ips[x]
                tds=ip.findAll('td')
                ip_temp='http://'+tds[1].contents[0]+':'+tds[2].contents[0]     #拼接代理IP
                proxy.append(ip_temp)
        except:
            continue
    with open('proxyIP.txt','w',encoding='utf-8',errors='ignore') as fip:
        fip.write(str('\n'.join(proxy)))
    return proxy
# def validate(proxy):
#     url='http://ip.chinaz.com/getip.aspx'
#     socket.setdefaulttimeout(3)
#     for i in range(0,len(proxy)):
#         try:
#             ip=proxy[i]
#             proxy_temp={'http':ip}
#             print(ip)
#             res=urllib.request.urlopen(url,proxies=proxy_temp).read()
#             print('--------------------')
#         except:
#             continue

if __name__=='__main__':
    proxy=getProxyIP()


