import requests
from bs4 import BeautifulSoup
import time
import re

def get_product_links_from_page(url):
    """
    从单个页面提取所有商品链接
    """
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 查找所有商品链接
        product_links = []
        
        # 方法1: 通过商品标题中的链接提取
        product_titles = soup.find_all('h3', class_='wd-entities-title')
        for title in product_titles:
            link_tag = title.find('a', href=True)
            if link_tag:
                product_links.append(link_tag['href'])
        
        # 方法2: 通过商品图片链接提取（备用方法）
        if not product_links:
            product_images = soup.find_all('a', class_='product-image-link')
            for img_link in product_images:
                if img_link.get('href'):
                    product_links.append(img_link['href'])
        
        # 去重
        product_links = list(set(product_links))
        
        return product_links
        
    except requests.RequestException as e:
        print(f"请求页面失败: {e}")
        return []

def get_all_pages_links(base_url):
    """
    获取所有分页的链接
    """
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    
    try:
        response = requests.get(base_url, headers=headers)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 查找分页链接
        page_links = [base_url]  # 包含第一页
        
        pagination = soup.find('nav', class_='woocommerce-pagination')
        if pagination:
            page_numbers = pagination.find_all('a', class_='page-numbers')
            for page_link in page_numbers:
                href = page_link.get('href')
                if href and href not in page_links:
                    page_links.append(href)
        
        # 去重并排序
        page_links = sorted(set(page_links), key=lambda x: int(re.search(r'/page/(\d+)/', x).group(1)) if '/page/' in x else 1)
        
        return page_links
        
    except requests.RequestException as e:
        print(f"获取分页失败: {e}")
        return [base_url]

def get_all_product_links(base_url):
    """
    获取所有页面的所有商品链接
    """
    print("正在获取分页链接...")
    page_urls = get_all_pages_links(base_url)
    print(f"找到 {len(page_urls)} 个分页")
    
    all_product_links = []
    
    for i, page_url in enumerate(page_urls, 1):
        print(f"正在处理第 {i} 页: {page_url}")
        
        product_links = get_product_links_from_page(page_url)
        print(f"第 {i} 页找到 {len(product_links)} 个商品")
        
        all_product_links.extend(product_links)
        
        # 添加延迟，避免请求过于频繁
        time.sleep(1)
    
    # 去重
    all_product_links = list(set(all_product_links))
    
    return all_product_links

def save_links_to_file(links, filename='product_links.txt'):
    """
    将链接保存到文件
    """
    with open(filename, 'w', encoding='utf-8') as f:
        for link in links:
            f.write(link + '\n')
    print(f"链接已保存到 {filename}")

# 主程序
if __name__ == "__main__":
    base_url = "https://www.theluxedeals.com/product-category/bags/"
    
    print("开始爬取商品链接...")
    all_products = get_all_product_links(base_url)
    
    print(f"\n总共找到 {len(all_products)} 个唯一商品链接:")
    for i, link in enumerate(all_products, 1):
        print(f"{i}. {link}")
    
    # 保存到文件
    save_links_to_file(all_products)
    
    print("\n爬取完成！")