import requests
from lxml import etree
import time

'''
 首先先运行此文件，更新代理IP，其他爬虫文件可能会用到代理IP
 获取代理IP -> 生成txt文件
 http://www.ip3366.net/free/?stype=2&page=1
'''


class daili:

    # 1.发送请求，获取响应
    def send_request(self, page):
        print("=============正在抓取第{}页===========".format(page))
        # 修改为新网站URL
        base_url = 'http://www.ip3366.net/free/?stype=2&page={}'.format(page)

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'}

        try:
            response = requests.get(base_url, headers=headers)
            # 尝试使用utf-8解码，根据实际情况调整
            try:
                data = response.content.decode('utf-8')
            except UnicodeDecodeError:
                data = response.content.decode('gbk', errors='ignore')
        except Exception as e:
            print(f"请求异常: {e}")
            return None
        finally:
            time.sleep(1)
            if 'response' in locals() and response:
                response.close()

        return data

    # 2.解析数据 - 适配新网站结构
    def parse_data(self, data):
        if not data:
            return []

        html_data = etree.HTML(data)
        # 修改为新网站的XPath表达式
        parse_list = html_data.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')

        return parse_list

    # 4.检测代理IP（保持不变）
    def check_ip(self, proxies_list):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'}

        can_use = []
        for proxies in proxies_list:
            try:
                response = requests.get('https://www.baidu.com/', headers=headers, proxies=proxies, timeout=2)

                if response.status_code == 200:
                    can_use.append(proxies)
                response.close()
            except Exception as e:
                print(f"代理测试失败: {e}")

        return can_use

    # 5.保存到文件（保持不变）
    def save(self, can_use):
        pyPath = "IP.txt"

        try:
            with open(pyPath, 'w') as file:
                for proxy in can_use:
                    file.write(f"{str(proxy)}\n")
            print(f"成功保存 {len(can_use)} 个有效代理到 {pyPath}")
        except Exception as e:
            print(f"保存文件失败: {e}")

    # 实现主要逻辑
    def run(self):
        proxies_list = []
        # 爬取页数可调整
        for page in range(1, 6):
            data = self.send_request(page)
            if not data:
                continue

            parse_list = self.parse_data(data)
            time.sleep(2)
            # 3.获取数据 - 适配新网站结构
            for tr in parse_list:
                proxies_dict = {}
                # 修改为新网站的字段位置
                ip_num = tr.xpath('./td[1]/text()')
                port_num = tr.xpath('./td[2]/text()')
                http_type = tr.xpath('./td[4]/text()')

                http_type = ' '.join(http_type).strip()
                ip_num = ' '.join(ip_num).strip()
                port_num = ' '.join(port_num).strip()

                if http_type and ip_num and port_num:
                    # 确保协议名称小写（requests库要求）
                    http_type = http_type.lower()
                    proxies_dict[http_type] = f"{ip_num}:{port_num}"
                    proxies_list.append(proxies_dict)

        print("获取到的代理IP数量：", len(proxies_list))
        print("获取到的代理IP:", proxies_list)

        if proxies_list:
            can_use = self.check_ip(proxies_list)
            print("能用的代理IP数量：", len(can_use))
            print("能用的代理IP:", can_use)
            self.save(can_use)
        else:
            print("未获取到任何代理IP，请检查网站结构是否变化")


if __name__ == "__main__":
    dl = daili()
    dl.run()