import json  
import urllib.parse  
import os  
import pandas as pd  
from lxml import etree  
from DrissionPage import ChromiumPage, ChromiumOptions  
from tqdm import tqdm  # 添加进度条  

def create_dp():  
    co = ChromiumOptions()  
    co.headless(False)  
    co.incognito(True)  
    co.set_argument('--no-sandbox')  
    co.set_argument("--disable-gpu")  
    co.set_argument('--start-maximized')  
    co.set_argument('--window-size', '1200,1000')  
    co.set_proxy('http://127.0.0.1:7890')  
    co.set_user_agent(  
        user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36')  

    browser = ChromiumPage(co)  
    return browser  

def get_detail_urls_for_keyword(browser, keyword):  
    # URL编码搜索关键词  
    encoded_keyword = urllib.parse.quote(keyword)  
    search_url = f"https://www.amazon.sa/s?k={encoded_keyword}"  
    
    print(f"正在搜索: {keyword}")  
    print(f"搜索链接: {search_url}")  
    
    browser.get(search_url)  
    this_html = browser.html  
    tree = etree.HTML(this_html)  
    
    # 获取产品的ASIN值  
    asin_values = tree.xpath('//div[@role="listitem"]/@data-asin')[0:3]  
    
    # 转换ASIN为详情页URL  
    detail_urls = [f"https://www.amazon.sa/-/en/dp/{asin}" for asin in asin_values if asin]  
    
    print(f"找到 {len(detail_urls)} 个产品链接")  
    
    return detail_urls, search_url  

def save_results(results, filename='amazon_detail_urls.json'):  
    """保存结果到JSON文件"""  
    with open(filename, 'w', encoding='utf-8') as f:  
        json.dump(results, f, ensure_ascii=False, indent=4)  
    print(f"✓ 已保存结果到 {filename}")  

def load_existing_results(filename='amazon_detail_urls.json'):  
    """加载已有的JSON结果文件（如果存在）"""  
    if os.path.exists(filename):  
        try:  
            with open(filename, 'r', encoding='utf-8') as f:  
                return json.load(f)  
        except json.JSONDecodeError:  
            print(f"警告: 无法解析 {filename}，将重新开始")  
    return {}  

def main():  
    output_filename = 'amazon_detail_urls.json'  
    
    # 加载已有结果（断点续爬功能）  
    results = load_existing_results(output_filename)  
    if results:  
        print(f"✓ 已从 {output_filename} 加载 {len(results)} 个已处理的关键词结果")  
    
    try:  
        # 读取Excel文件  
        df = pd.read_excel('keywords.xlsx')  
        keywords_column = '搜索词'  
        
        # 检查列是否存在  
        if keywords_column not in df.columns:  
            print(f"错误: 未找到列 '{keywords_column}'，可用列: {df.columns.tolist()}")  
            return  
        
        # 过滤空值  
        keywords = [str(k).strip() for k in df[keywords_column] if pd.notna(k) and str(k).strip()]  
        # 过滤已处理的关键词  
        keywords_to_process = [k for k in keywords if k not in results]  
        
        print(f"✓ 共发现 {len(keywords)} 个关键词，还剩 {len(keywords_to_process)} 个待处理")  
        
        # 创建浏览器实例  
        browser = create_dp()  
        print("✓ 浏览器已初始化")  
        
        # 使用进度条处理每个关键词  
        for keyword in tqdm(keywords_to_process, desc="处理关键词中", unit="个"):  
            # 获取关键词的详情页URL  
            detail_urls, search_url = get_detail_urls_for_keyword(browser, keyword)  
            
            # 存储结果，包含搜索URL  
            results[keyword] = {  
                "search_url": search_url,  
                "product_urls": detail_urls  
            }  
            
            # 每处理一个关键词就保存一次（断点续爬）  
            save_results(results, output_filename)  
            print(f"✓ 已完成关键词: {keyword}")  
            
        print(f"✓ 全部处理完成！所有结果已保存到 {output_filename}")  
            
    except Exception as e:  
        print(f"错误: {e}")  
        # 即使发生错误也保存已处理的结果  
        if 'results' in locals() and results:  
            save_results(results, output_filename)  
            print(f"✓ 部分结果已保存到 {output_filename}")  
    finally:  
        # 关闭浏览器  
        if 'browser' in locals():  
            browser.quit()  
            print("✓ 浏览器已关闭")  

if __name__ == '__main__':  
    main()