import requests
from fake_useragent import UserAgent
import proxyandua 
from lxml import etree
import random

ua = UserAgent().random

header = {"User-Agent":ua}

proxy = {"http":"http://"+random.choice(proxyandua.proxy)}

lis = []

def get(url):
	print("开始爬取")

	one = requests.get(url,headers=header).content.decode("gb2312")
	two = etree.HTML(one).xpath("//*[@id='list']/table/tbody/tr")

	for three in two:
		four = three.xpath(".//td/text()")
		lis.append(four[0]+":"+four[1])
		print("存储IP-->"+four[0]+":"+four[1])

	next_url = ""
	page_list = etree.HTML(one).xpath("//*[@id='listnav']/ul/a")
	for five in page_list:
		if five.xpath("./text()")[0] == "下一页":
			next_url = five.xpath("./@href")[0]
	if next_url:
		print("进入下一页：第"+next_url[-1:]+"页")
		get("http://www.ip3366.net/free/"+next_url)


get("http://www.ip3366.net/free/?stype=1&page=1")

for i in lis:
	print("'"+i+"',")



# proxyandua.get(lis)
# print(proxyandua.pox)

