import random
import time
import requests
from bs4 import BeautifulSoup
import chardet
from io import BytesIO
import gzip

# 百度搜索 URL 和请求头
search_url = "https://www.baidu.com/s?wd=HUAWEI%20NOVA%203%20上市时间%20停售时间"
user_agents = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0",
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36"
]
proxies = {
    "http": "http://your_proxy_ip:port",
    "https": "https://your_proxy_ip:port"
}
# 模拟浏览器 Cookie（可以从开发者工具中获取）
cookies = {
    "BAIDUID": "BF2F984E388D66E17A6965EFD18A45E1:SL=0:NR=10:FG=1",  # 从浏览器中获取的 BAIDUID
    "BDUSS": "UpXbDNleEMyelcyRWZaa3M1eWZlSE42Z1o5fnVRbkl3Yy1iNWRENUlsbmNBbEJtSVFBQUFBJCQAAAAAAAAAAAEAAAA9vFF~18~Jq7Wlt-cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANx1KGbcdShma",   # 从浏览器中获取的 BDUSS
    "H_PS_PSSID": "60279_61027_61216_60853_61492_61506_61520_61529_61612_61633_61639_61720",  # 从浏览器中获取的 H_PS_PSSID
    # 添加其他你从浏览器获取的 Cookie 项
}

# 设置请求头，随机选择 User-Agent
headers = {
    "User-Agent": random.choice(user_agents),  # 随机选择一个 User-Agent
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Referer": "https://www.baidu.com/",
    "Cache-Control": "max-age=0",
    "Upgrade-Insecure-Requests": "1"
}
proxies = {
    "http": "http://your_proxy_ip:port",
    "https": "https://your_proxy_ip:port"
}

# 请求数据
response = requests.get(search_url, headers=headers, timeout=10,  cookies=cookies)

# 检查响应内容是否为空
if not response.content:
    print("响应内容为空")
else:
    # 如果是压缩数据，解压
    if response.headers.get('Content-Encoding') == 'gzip':
        buf = BytesIO(response.content)
        f = gzip.GzipFile(fileobj=buf)
        html_content = f.read().decode('utf-8')  # 假设解压后是 UTF-8 编码
    else:
        # 自动检测编码
        detected_encoding = chardet.detect(response.content)['encoding']
        if detected_encoding is None:
            print("无法检测编码，尝试使用 GBK 作为默认编码")
            response.encoding = 'gbk'  # 默认使用 GBK 编码
        else:
            response.encoding = detected_encoding

        html_content = response.text  # 直接获取解码后的文本

# 检查是否触发百度的反爬机制
if "百度安全验证" in html_content or "网络不给力" in html_content:
    print("触发反爬，请更换策略。")
else:
    # 模拟人工浏览的延时
    time.sleep(random.uniform(1, 3))  # 延时 1 到 3 秒之间的随机值

    # 解析 HTML 内容
    soup = BeautifulSoup(html_content, "html.parser")

    # 打印格式化后的 HTML 内容
    print(soup.prettify())

    # 如果需要提取特定的上市时间和停售时间
    # 假设页面中含有类似的字段可以提取
    # 例如：结果的标题包含“上市时间”或“停售时间”，根据实际情况进行提取
    # 例如：
    result_divs = soup.find_all("div", class_="result")
    for div in result_divs:
        print(div.text)
