# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup as BS
def get_web_ips(num,crawl_url):  # 参数为想要获取的ip数和测试ip
	User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0)Gecko/20100101 Firefox/43.0'
	headers={'User-Agent':User_Agent}
	counter=0
	useful_ips=[]
	for i in range(1,10):
		# 从西池代理网站爬取ip列表
		url='http://www.xicidaili.com/nn/'+str(i)
		req=requests.get(url,headers=headers)
		req.encoding=req.apparent_encoding
		bs=BS(req.content,'html.parser')
		trs=bs.find_all('tr')
		ips=[]
		
		for i in range(1,len(trs)):
			ip=trs[i]
			tds=ip.find_all('td')
			http=tds[5].text
			http=str(http).lower()
			ip_str=http+'://'+tds[1].text+':'+tds[2].text
			ip_str=str(ip_str)
			ips.append({'ip':ip_str})
			
		# 测试ip是否可用
		headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko'}
		for proxies in ips:
			try :
				req=requests.get(crawl_url,headers=headers,proxies=proxies)
				if req.status_code==200:
					counter=counter+1
					
					useful_ips.append(proxies)
					print ('get ',counter)
					if counter==num:
						return useful_ips
			except Exception as e:
				print('unuseful')

def get_txt_ips(file_name):
    with open(file_name,'r') as f:
        lines=f.readlines()
        lines=[line.strip() for line in lines]
	#print ('get ips num ',len(lines))
	ips=[]
	for line in lines:
		ip=eval(line)
		ips.append(ip)
	return ips

if __name__=='__main__':
	crawl_url='http://www.baidu.com'  # 用于测试的url
	ips=get_web_ips(100,crawl_url)
	with open('fake_ips_100.txt','w') as f:
		for line in ips:
			f.write(str(line)+'\n')
