from bs4 import BeautifulSoup
import requests
from lxml import etree
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# 配置请求头
options= webdriver.ChromeOptions()
options.add_argument('lang=zh_CN.UTF-8')# 设置中文
options.add_argument('user-agent="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4882.400 QQBrowser/9.7.13059.400"')# 设置头部
# 设置无头浏览器，后台运行程序
options.add_argument('--headless')
driver = webdriver.Chrome(options=options)

class daili:
    # 1.发送请求，获取响应
    def send_request(self,page):
        print("=============正在抓取第{}页===========".format(page))
        # 打开目标网页
        url = 'https://www.kuaidaili.com/free/inha/{}/'.format(page)  # 更改为你要爬取的网页
        driver.get(url)
        
        # 等待动态内容加载完成
        # 根据网站的具体加载方式，可能需要调整这些参数
        wait = WebDriverWait(driver, 20)
        wait.until(EC.presence_of_element_located((By.ID, 'table__free-proxy')))  # 更改为你期望加载的元素的ID
        data=driver.page_source # 获取网页的html数据
        # 打印内容
        time.sleep(1)
        return data

    # 2.解析数据
    def parse_data(self,data):
        # 数据转换
        # 对html进行解析，如果提示lxml未安装，直接pip install lxml即可
        soup=BeautifulSoup(data,'lxml')
        # 分组数据
        parse_list=soup.find('tbody',class_="kdl-table-tbody").find_all('tr')
        
        return parse_list

    # 4.检测代理IP
    def check_ip(self,proxies_list):
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'}

        can_use = []
        for proxies in proxies_list:
            try:
                response = requests.get('https://www.baidu.com/',headers=headers,proxies=proxies,timeout=0.1)
                if response.status_code == 200:
                    can_use.append(proxies)

            except Exception as e:
                print(e)
                
        return can_use

    # 5.保存到文件
    def save(self,can_use):
        # 解决中文乱码
        encoding = 'utf-8'
        file = open('IP.txt', 'w',encoding=encoding)
        for i in range(len(can_use)):
            s = str(can_use[i])+ '\n'
            file.write(s)
        file.close()
        # 关闭浏览器
        driver.quit()
    
    # 实现主要逻辑
    def run(self,num):
        proxies_list = []
        # 实现翻页，我这里只爬取了四页（可以修改5所在的数字）
        page=1
        while True:
            data = self.send_request(page)
            parse_list = self.parse_data(data)
            # 3.获取数据
            for tr in parse_list:
                tds = tr.find_all('td')
                port_num = tds[1]
                if(port_num.get_text()=='80'):
                    proxies_dict  = {}
                    http_type = tds[3]
                    ip_num = tds[0]
                    
                    http_type = ' '.join(http_type)
                    ip_num = ' '.join(ip_num)
                    port_num = ' '.join(port_num)

                    proxies_dict[http_type] = ip_num + ":" + port_num

                    proxies_list.append(proxies_dict)
                else:
                    continue
            if(len(proxies_list)>=num):
                break
            else:
                page+=1
        
        print("获取到的代理IP数量：",len(proxies_list))

        can_use = self.check_ip(proxies_list)

        print("能用的代理IP数量：",len(can_use)) 
        print("能用的代理IP:",can_use) 

        self.save(can_use)

if __name__ == "__main__": 
    dl = daili()
    dl.run(20)
