import urllib.request
import re

def dangdang_bookbag_crawler():
    url = "https://search.dangdang.com/?key=%CA%E9%B0%FC&category_id=10009684#J_tab"
    
    try:
        response = urllib.request.urlopen(url, timeout=3)
        html = response.read().decode('gb2312')
        li_pattern = re.compile(r'<li[^>]*?>(.*?)</li>', re.S)
        lis = li_pattern.findall(html)

        name_pattern = re.compile(r'<a\s*?title="\s*([^"]*?)"', re.S)
        price_pattern = re.compile(r'<span class="price_n">(.*?)</span>', re.S)

        valid_count = 0
        print("当当网“书包”商品爬取结果：")
        print(f"{'序号':<4}{'价格':<10}{'商品名'}")
        print("-" * 80)

        for idx, li in enumerate(lis, start=1):
            name_list = name_pattern.findall(li)
            price_list = price_pattern.findall(li)
            
            if name_list and price_list:
                product_name = name_list[0].strip()
                product_price = price_list[0].strip().replace('&yen;', '¥')
                print(f"{idx:<4}{product_price:<10}{product_name}")
                valid_count += 1

        print("-" * 80)
        print(f"共爬取到 {valid_count} 件有效商品")

    except urllib.error.HTTPError as e:
        print(f"请求错误：HTTP状态码 {e.code}（可能是反爬拦截，建议添加请求头）")

if __name__ == "__main__":
    print("开始爬取当当网“书包”商品数据...\n")
    dangdang_bookbag_crawler()
    print("\n爬取任务结束！")