import requests
from datetime import datetime
# import urllib.request as ur
#
# kw = input('我想搜索:')
#
# myUrl = 'https://www.baidu.com/s?wd='
# myUrl += ur.quote(kw)   # quote把本地字节转换成网络字节，把转换过的字节增加到搜索的url上，酒实现自定义搜索了
#                         # quote方法在urllib.request库里
#
# myHeader = {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
# }
#
# resp = requests.get(myUrl, headers=myHeader)
# resp.encoding = 'utf-8'
# with open('bd_search.html', 'wb') as file:
#     file.write(resp.content)

myUrl = 'https://www.baidu.com/s?'

kw = input('我想搜索:')

myHeader = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
}
data = {
    'wd': kw
}


def get_free_proxy():
    # 代理服务API列表
    proxy_services = [
        'http://api.proxybay.com/random-proxy',
        'http://www.proxy360.cn/Api/randIp',
        # 添加更多代理服务API
    ]

    for service in proxy_services:
        try:
            response = requests.get(service)
            if response.status_code == 200:
                # 假设返回的代理格式为 ip:port
                ip_port = response.text.strip()
                # 测试代理是否可用
                proxies = {'http': 'http://' + ip_port, 'https': 'https://' + ip_port}
                test_url = 'http://httpbin.org/ip'  # 一个测试的URL
                test_response = requests.get(test_url, proxies=proxies)
                if test_response.status_code == 200:
                    print(f'Found available proxy: {ip_port}')
                    return proxies
        except requests.exceptions.RequestException:
            pass

    return None


print(datetime.now())
myProxies = get_free_proxy()
print(datetime.now())         # 搜索一次可用的代理ip大概用时20秒

sess = requests.session()       # cookie有失效时间，session请求带上前一次请求时的cookie，不用每次在网页上复制cookie

'''
requests库，.get的时候直接增加param的方式
不同于urllib的拼接url的方式，更方便
params=参数用于get请求，data=参数英语post请求
'''
resp = sess.get(myUrl, headers=myHeader, params=data, proxies=myProxies)

resp.encoding = 'utf-8'
with open('bd_search.html', 'wb') as file:
    file.write(resp.content)
