import csv
import time

import requests
from bs4 import BeautifulSoup
import logging

base_url = 'http://www.ip3366.net/free/?stype=3&page=1'


data = []
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                         'Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0',
           'Cookie': 'id58=CroEA2gfFxdu202cL/O8Ag==; expires=Mon, 10-May-27 09:06:31 GMT; domain=58che.com; path=/'}
def get_soup(url, headers):
    try:
        response = requests.get(url, headers=headers)
        if response.status_code != 200:
            logging.error(f'请求失败，状态码：{response.status_code}')
            return None
        response.encoding = 'utf-8'
        soup = BeautifulSoup(response.content, 'html.parser')
    except Exception as e:
        logging.error(e)
        return None
    return soup


def get_data(soup):
    logging.info("查找ip")
    body = soup.find('tbody')
    if not body:
        logging.info('body is empty')
        return
    for i in (body.find_all('tr')):
        tem = i.find_all('td')
        if len(tem) < 2:
            continue
        ip = tem[0].text.strip()
        port = tem[1].text.strip()
        data.append([ip, port])
    logging.info(f'爬取到{len(data)}条ip')
    return data




for i in range(1,8):
    url = f'http://www.ip3366.net/free/?stype=3&page={i}'
    soup = get_soup(url+str(i), headers)
    if not soup:
        logging.info(f'爬取结束')
        break
    get_data(soup)
    logging.info(f'第{i}页爬取完成')
logging.info(f'共爬取到{len(data)}条ip')

if data:
    with open('proxy_data.csv', 'a', newline='', encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile)
        # 写入表头，需要根据实际数据调整
        writer.writerow(['IP', 'Port', '其他信息'])
        writer.writerows(data)
    logging.info("数据已保存到 proxy_data_all.csv")