import random
import time
import requests
from bs4 import BeautifulSoup
import chardet
from io import BytesIO
import gzip


def extract_time_from_text(text, keyword):
    """
    根据关键词从文本中提取时间。
    """
    # 这是一个简单的时间提取方法，可以根据实际情况进行改进
    import re
    match = re.search(r"(\d{4}[-/年]\d{1,2}[-/月]\d{1,2}日?)", text)
    if match:
        return match.group(1)
    return None


def query_device_info_baidu(device_model):
    """
    通过百度搜索获取设备的上市和停售时间。
    """
    search_url = f"https://www.baidu.com/s?wd={device_model}+上市时间+停售时间"

    # 设置多个 User-Agent
    user_agents = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0",
        "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36"
    ]

    # 模拟请求头，随机选择 User-Agent 和 Cookie
    headers = {
        "User-Agent": random.choice(user_agents),  # 随机选择一个 User-Agent
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Referer": "https://www.baidu.com/",
        "Cache-Control": "max-age=0",
        "Upgrade-Insecure-Requests": "1"
    }

    # 你可以从浏览器开发者工具中获取 Cookie
    cookies = {
        "BAIDUID": "xxxxxxxxxxxxxxxxxxxxxxxx",  # 替换为实际的 Cookie
        "BDUSS": "xxxxxxxxxxxxxxxxxxxxxxxx",  # 替换为实际的 Cookie
        "H_PS_PSSID": "xxxxxxxxxxxxxxxxxx",  # 替换为实际的 Cookie
    }

    # 请求数据，携带 Cookie 和 headers
    try:
        response = requests.get(search_url, headers=headers, cookies=cookies, timeout=10)

        # 检测并解压 gzip 格式的响应数据
        if response.headers.get('Content-Encoding') == 'gzip':
            buf = BytesIO(response.content)
            f = gzip.GzipFile(fileobj=buf)
            html_content = f.read().decode('utf-8')
        else:
            html_content = response.text  # 直接使用文本

        response.raise_for_status()  # 检查请求是否成功

        soup = BeautifulSoup(html_content, "html.parser")

        # 初始化上市时间和停售时间
        listing_time, discontinued_time = None, None

        # 解析百度搜索摘要
        result_divs = soup.find_all("div", class_="c-container")  # 搜索结果的主要容器
        for div in result_divs:
            # 提取标题和链接
            title = div.find("h3")
            link = div.find("a")

            # 提取标题或链接中的时间信息
            if title:
                text = title.get_text()
                if "上市" in text:
                    listing_time = extract_time_from_text(text, "上市")
                if "停售" in text or "停产" in text:
                    discontinued_time = extract_time_from_text(text, "停售|停产")

            # 提取正文内容
            content = div.get_text()
            if "上市" in content:
                listing_time = extract_time_from_text(content, "上市")
            if "停售" in content or "停产" in content:
                discontinued_time = extract_time_from_text(content, "停售|停产")

            # 如果两个时间都找到，提前结束
            if listing_time and discontinued_time:
                break

        return listing_time, discontinued_time

    except Exception as e:
        print(f"查询失败: {device_model}, 错误: {e}")
    return None, None
