import openpyxl
import requests
from bs4 import BeautifulSoup
from time import sleep
import random
import re
from charset_normalizer import detect
import chardet
from io import BytesIO
import gzip

# 文件路径
file_path = "D:\workNote\钉钉\设备型号副本.xlsx"
output_file_path = "更新后的设备信息.xlsx"

# 定义查询函数
# 定义查询函数
def query_device_info_baidu(device_model):
    """
    通过百度搜索获取设备的上市和停售时间。
    """
    search_url = f"https://www.baidu.com/s?wd={device_model}+上市时间+停售时间"
    # 设置多个 User-Agent
    user_agents = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0",
        "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36"
    ]

    # 模拟请求头，随机选择 User-Agent 和 Cookie
    headers = {
        "User-Agent": random.choice(user_agents),  # 随机选择一个 User-Agent
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Referer": "https://www.baidu.com/",
        "Cache-Control": "max-age=0",
        "Upgrade-Insecure-Requests": "1"
    }

    cookies = {
        "BAIDUID": "BF2F984E388D66E17A6965EFD18A45E1:SL=0:NR=10:FG=1",  # 从浏览器中获取的 BAIDUID
        "BDUSS": "UpXbDNleEMyelcyRWZaa3M1eWZlSE42Z1o5fnVRbkl3Yy1iNWRENUlsbmNBbEJtSVFBQUFBJCQAAAAAAAAAAAEAAAA9vFF~18~Jq7Wlt-cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANx1KGbcdShma",
        # 从浏览器中获取的 BDUSS
        "H_PS_PSSID": "60279_61027_61216_60853_61492_61506_61520_61529_61612_61633_61639_61720",  # 从浏览器中获取的 H_PS_PSSID
        # 添加其他你从浏览器获取的 Cookie 项
    }

    try:
        response = requests.get(search_url, headers=headers, timeout=10, cookies=cookies)
        # 检查响应内容
        response.encoding = 'utf-8'  # 或其他合适的编码
        if response.status_code == 200:
            print(response.content[:200])  # 打印原始字节
        else:
            print("请求失败，状态码：", response.status_code)

        # 如果响应是压缩数据，解压
        if 'gzip' in response.headers.get('Content-Encoding', ''):
            buf = BytesIO(response.content)
            with gzip.GzipFile(fileobj=buf) as f:
                response = f.read().decode('utf-8', errors='ignore')  # 解压并尝试解码为 UTF-8
        else:
            response = response  # 如果没有压缩，直接获取文本
        response.raise_for_status()
        soup = BeautifulSoup(response.text, "html.parser")

        # 初始化上市时间和停售时间
        listing_time, discontinued_time = None, None

        # 解析百度搜索摘要
        result_divs = soup.find_all("div", class_="c-container")  # 搜索结果的主要容器
        for div in result_divs:
            # 提取标题和链接
            title = div.find("h3")
            link = div.find("a")

            # 提取标题或链接中的时间信息
            if title:
                text = title.get_text()
                if "上市" in text:
                    listing_time = extract_time_from_text(text, "上市")
                if "停售" in text or "停产" in text:
                    discontinued_time = extract_time_from_text(text, "停售|停产")

            # 提取正文内容
            content = div.get_text()
            if "上市" in content:
                listing_time = extract_time_from_text(content, "上市")
            if "停售" in content or "停产" in content:
                discontinued_time = extract_time_from_text(content, "停售|停产")

            # 如果两个时间都找到，提前结束
            if listing_time and discontinued_time:
                break

        return listing_time, discontinued_time

    except Exception as e:
        print(f"查询失败: {device_model}, 错误: {e}")
    return None, None


def extract_time_from_text(text, keywords):
    """
    从文本中提取时间信息，根据关键词过滤。
    """
    # 匹配可能的时间格式
    time_patterns = [
        r"(\d{4}年\d{1,2}月\d{1,2}日)",  # 完整日期
        r"(\d{4}年\d{1,2}月)",  # 年月
        r"(\d{4}年)"  # 年份
    ]

    for pattern in time_patterns:
        match = re.search(rf"{keywords}.*?{pattern}", text)
        if match:
            return match.group(1)  # 返回匹配的时间
    return None

#  ----------------从这里开始---------------------------
# 读取 Excel 文件， 读取手机设备查询互联网的 上市和 停售信息
wb = openpyxl.load_workbook(file_path)
sheet = wb.active

# 遍历设备型号并补全信息
for row in sheet.iter_rows(min_row=2, max_row=sheet.max_row, min_col=1, max_col=3):
    model, listing_time, discontinued_time = row
    device_model = model.value

    # 仅查询空白的上市时间或停售时间
    if (listing_time.value is None or listing_time.value == "") or \
       (discontinued_time.value is None or discontinued_time.value == ""):
        print(f"查询设备: {device_model}")
        listing, discontinued = query_device_info_baidu(device_model)

        # 更新上市和停售时间
        if listing_time.value is None or listing_time.value == "":
            listing_time.value = listing if listing else "未知"

        if discontinued_time.value is None or discontinued_time.value == "":
            discontinued_time.value = discontinued if discontinued else "未知"

        # 防止访问过快，随机休眠
        sleep(random.uniform(2, 5))

# 保存更新后的文件
wb.save(output_file_path)
print(f"更新完成，已保存到：{output_file_path}")


