#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2024-01-26 9:08
# @Author : loneliness_burial
# @File : ip代理池.py
# @Software: PyCharm
# 深情不改必坠死海

import time
import pandas as pd
import requests  # 导入模块

requests.packages.urllib3.disable_warnings()  # 这是因为跳过了SSL证书验证导致的，程序本身不具有代码等问题 加左侧代码可解决


def send_request():
    header = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9,ja-JP;q=0.8,ja;q=0.7',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Cookie': 'channelid=0; sid=1706229943702651; _ga=GA1.2.2074796018.1706229945; _gid=GA1.2.51053211.1706229946',
        'DNT': '1',
        'Pragma': 'no-cache',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'none',
        'Sec-Fetch-User': '?1',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36',
        'sec-ch-ua': '"Not A(Brand";v="99", "Google Chrome";v="121", "Chromium";v="121"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
    }
    # 爬取页数，可自行修改
    for i in range(1, 8):
        print(f'正在抓取第{i}页……')
        response = requests.get(url=f'https://www.kuaidaili.com/free/inha/{i}/', headers=header)
        df = pd.read_html(response.text)[0]  # 利用pandas爬取网页的表格
        time.sleep(1)  # 最好设置1秒，以免网页访问过快导致爬取ip信息不成功
        for index, row in df.iterrows():
            proxy = {
                'http': 'http://' + f"{row['IP']}:{row['PORT']}"
            }
            all_ip_list.append(proxy)
        for proxy_item in all_ip_list:
            try:
                response = requests.get(url='https://www.baidu.com', headers=header, proxies=proxy_item,
                                        timeout=5, verify=False)  # 设置timeout，使响应等待1s

                if response.status_code == 200:  # 如果网络状态码为200即为访问成功，说明上述ip有效
                    usable_ip_list.append(proxy_item)
                    print(proxy_item, '\033[32m可用\033[0m')
                else:
                    print(proxy_item, '不可用')
            except Exception as e:
                print(proxy_item, '\033[031m请求异常\033[0m', e)


if __name__ == '__main__':
    all_ip_list = []  # 用于存放从网站上抓取到的ip
    usable_ip_list = []  # 用于存放通过检测ip后是否可以使用
    send_request()
    test = pd.DataFrame(data=usable_ip_list)  # 去掉索引值，否则会重复
    new_df = test.drop_duplicates(subset='http', keep='first', inplace=False)  # 去除ip重复项
    # D:\python-project\py4.requests模块高级
    ip_csv_path = 'D:/python-project/py4.requests模块高级/ip_pool.csv'
    new_df.to_csv(ip_csv_path, mode='w', encoding='utf-8')  # 去重后的ip文件地址
    print(f"保存成功\n{ip_csv_path}")
    print('抓取完成！')
    print(f'抓取到的ip个数为：{len(all_ip_list)}')
    print(f'可以使用的ip个数为：{len(usable_ip_list)}')