import requests
from bs4 import BeautifulSoup
import pandas as pd
import time

# 循环查询多个页面
# 请求手机列表页面并获取所有手机链接
headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
def get_phone_links(page_number):
    url = f'https://detail.zol.com.cn/cell_phone_index/subcate57_0_list_1_0_1_2_0_{page_number}.html'
    response = requests.get(url, headers=headers)

    if response.status_code != 200:
        print(f"网页请求失败: {url}")
        return []

    # 使用BeautifulSoup解析网页
    soup = BeautifulSoup(response.text, 'html.parser')

    # 提取手机的链接
    phone_links = []
    phones = soup.find_all('h3')

    for phone in phones:
        link_tag = phone.find('a', href=True)
        if link_tag:
            # 提取相对路径部分，然后拼接成完整的URL
            relative_url = link_tag['href']
            if relative_url.startswith('/'):
                phone_links.append(f"https://detail.zol.com.cn{relative_url}")

    return phone_links


# 定义函数提取每个手机的详细信息
def extract_phone_details(phone_url):
    phone_details = {}
    try:
        phone_response = requests.get(phone_url, headers=headers)
        if phone_response.status_code != 200:
            print(f"无法访问 {phone_url}")
            return None

        phone_soup = BeautifulSoup(phone_response.text, 'html.parser')

        # 使用 find_all 获取所有 li 标签，然后通过文本匹配提取参数链接
        param_link_tag = None
        li_tags = phone_soup.find_all('li')  # 查找所有的 <li> 标签

        # 遍历所有的 <li> 标签，找到包含文本 '参数' 的 <a> 标签
        for li in li_tags:
            a_tag = li.find('a', href=True)
            if a_tag and a_tag.string == '参数':
                param_link_tag = a_tag
                break  # 找到之后跳出循环

        if param_link_tag:
            param_relative_url = param_link_tag['href']
            if param_relative_url.startswith('/'):
                param_url = f"https://detail.zol.com.cn{param_relative_url}"
                phone_details['设备参数详情页面地址'] = param_url
                # 进一步抓取参数页面信息
                param_details = extract_phone_parameters(param_url)
                phone_details.update(param_details)

    except Exception as e:
        print(f"提取详情信息时出错: {e}")
        return None

    return phone_details


# 定义函数提取手机的参数信息
def extract_phone_parameters(param_url):
    try:
        # 获取参数页面
        param_response = requests.get(param_url, headers=headers)
        if param_response.status_code != 200:
            print(f"无法访问 {param_url}")
            return None

        param_soup = BeautifulSoup(param_response.text, 'html.parser')

        # 提取手机参数信息
        phone_details = {}

        # 通用提取函数：根据 <th> 的文本内容提取对应 <td> 中的值
        def extract_info(th_text, span_id):
            th_tag = param_soup.find('th', string=th_text)
            if th_tag:
                td_tag = th_tag.find_next('td')
                if td_tag:
                    span_tag = td_tag.find('span', id=span_id)
                    if span_tag:
                        return span_tag.get_text(strip=True)
            return None

        # 字段映射字典
        fields = {
            '产品型号': ('产品型号', ['newPmVal_1', 'newPmVal_2', 'newPmName_2']),
            '国内发布时间': ('国内发布时间', ['newPmVal_0']),
            '上市日期': ('上市日期', ['newPmVal_1', 'newPmVal_0']),
            '电商报价': ('电商报价', ['newPmVal_1']),
            '操作系统': ('操作系统', ['newPmVal_15', 'newPmVal_14']),
        }

        # 提取所有字段
        for field_name, (th_text, span_ids) in fields.items():
            value = None
            # 遍历备用的 ID，优先取第一个有值的
            for span_id in span_ids:
                value = extract_info(th_text, span_id)
                if value:  # 找到有效值，退出循环
                    break
            if value:
                phone_details[field_name] = value

        return phone_details

    except Exception as e:
        print(f"提取参数信息时出错: {e}")
        return None


# 存储所有手机的详细信息
all_phone_details = []

# 查询多个页面（从1到78页）
for page_number in range(1, 79):
    print(f"正在抓取第 {page_number} 页数据...")
    phone_links = get_phone_links(page_number)

    # 遍历所有手机链接并提取详情
    for link in phone_links:
        details = extract_phone_details(link)
        if details:
            all_phone_details.append(details)

        # 限制访问速度，避免请求过快
        time.sleep(1)

# 将数据保存到Excel文件
# 如果文件已经存在，则追加数据；如果文件不存在，则创建新文件
df = pd.DataFrame(all_phone_details)

# 将数据追加到现有的Excel文件
excel_filename = 'phones_details_with_params.xlsx'
try:
    existing_df = pd.read_excel(excel_filename, engine='openpyxl')
    df = pd.concat([existing_df, df], ignore_index=True)
except FileNotFoundError:
    print("文件不存在，将创建新的文件。")

df.to_excel(excel_filename, index=False, engine='openpyxl')

print(f"数据已成功保存到 '{excel_filename}'")
