from bs4 import BeautifulSoup
import requests
import os
from charset_normalizer import detect  # 自动检测编码

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIG_FILE = os.path.join(BASE_DIR, "conf", "spider_config.json")
LOG_FILE = os.path.join(BASE_DIR, "log", "spider_log.log")  # 修改扩展名为 .log

def ensure_directories_and_files():
    """确保所需的目录和文件存在"""
    # 确保 conf 目录存在
    if not os.path.exists("conf"):
        os.makedirs("conf")
        print("Created directory: conf")

    # 确保 log 目录存在
    if not os.path.exists("log"):
        os.makedirs("log")
        print("Created directory: log")

    # 确保配置文件存在
    if not os.path.exists(CONFIG_FILE):
        with open(CONFIG_FILE, "w") as config_file:
            config_file.write("{}")
        print(f"Created file: {CONFIG_FILE}")

    # 确保日志文件存在
    if not os.path.exists(LOG_FILE):
        with open(LOG_FILE, "w") as log_file:
            log_file.write("")
        print(f"Created file: {LOG_FILE}")

ensure_directories_and_files()

# 爬虫目标url
url = 'https://www.4399.com/'

# 发送 HTTP 请求获取网页内容
response = requests.get(url)

# 自动检测网页编码
detected_encoding = detect(response.content)['encoding']
response.encoding = detected_encoding  # 设置为检测到的编码

# 确保请求成功
if response.status_code == 200:
    # 使用 BeautifulSoup 解析网页内容
    soup = BeautifulSoup(response.text, 'lxml')
    
    # 查找<title>标签
    title_tag = soup.find('title')
    # 查找所有 <img> 标签
    img_tags = soup.find_all('img')

    
    # 打印标题文本
    if title_tag:
        print(title_tag.get_text())
    else:
        print("未找到<title>标签")
    # 遍历所有 <img> 标签并提取 alt 属性
    for idx, img_tag in enumerate(img_tags, start=1):
        alt_text = img_tag.get('alt', '未找到 alt 属性')  # 如果没有 alt 属性，返回默认值
        print(f"第 {idx} 个图片的 alt 属性: {alt_text}")
else:
    print("请求失败，状态码：", response.status_code)