import requests
from bs4 import BeautifulSoup
import re
import time
ip = []
port = []
type = []
def get_url(page):
    for i in range(int(page)):
        try:
            print('正在爬取第%d页'%(i+1))
            url = 'https://www.kuaidaili.com/free/inha/{}/'.format(i+1)
            print("爬取网址为：",url)
            IP1,PORT1,TYPE1=get_content(url)
            process_data(IP1,PORT1,TYPE1)
            print('休息一下')
            time.sleep(3)#防止访问频率过快，被封
        except Exception as e:
            print('爬取失败',e)
def get_content(url):
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
    response = requests.get(url,headers=headers)
    if response.status_code == 200:
        print('连接正常')
        soup = BeautifulSoup(response.text,'lxml')
        contents = soup.find_all('td')
        IP = []
        PORT = []
        TYPE = []
        for content in contents:
            content = str(content)
            if re.findall(r'[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*',content):
                IP.append(re.findall(r'[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*',content))
            elif re.findall(r'<td data-title="PORT">',content):
                PORT.append(re.findall(r'\d{4}',content))
            elif re.findall(r'<td data-title="类型">',content):
                TYPE.append(re.findall('[A-Z]{4,5}',content))
        return IP,PORT,TYPE
    else:
        print('连接失败或遭遇反爬')
def process_data(IP,PORT,TYPE):
    for content in IP:
        ip.append(content[0])
    for content in PORT:
        port.append(content[0])
    for content in TYPE:
        type.append(content[0])
    reg = []
    for i in range(len(ip)):
        dic ={}
        dic[type[i]] = ip[i]+':'+port[i]
        reg.append(dic)
    can_use = check_ip(reg)
    print('有用的ip数量为：',len(can_use))
    save_ip(can_use)
def check_ip(reg):
    url = 'https://www.baidu.com/'
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
    can_use = []
    for i in reg:
        response = requests.get(url,headers,proxies = i,timeout = 1)
        if response.status_code == 200:
            can_use.append(i)
    return can_use
def save_ip(data):
    with open('ip.txt','w+') as f:
        for i in data:
            f.write(str(i)+'\n')
        f.close()
if __name__ == '__main__':
    page = input('爬取页数：')
    get_url(page)
    print('爬取完成')