#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import json
import csv
import re
import time
import random
import argparse
import requests
from bs4 import BeautifulSoup

def get_headers():
    """返回随机User-Agent的请求头"""
    user_agents = [
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.2 Safari/605.1.15"
    ]
    return {
        "User-Agent": random.choice(user_agents),
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
        "Accept-Language": "en-US,en;q=0.5",
        "Accept-Encoding": "gzip, deflate, br",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-User": "?1"
    }

def load_seller_info(data_dir):
    """加载已保存的卖家信息"""
    seller_info_file = os.path.join(data_dir, "seller_info.json")
    
    if not os.path.exists(seller_info_file):
        raise FileNotFoundError(f"卖家信息文件 {seller_info_file} 不存在，请先运行collect_seller_info.py")
    
    with open(seller_info_file, 'r', encoding='utf-8') as f:
        seller_info = json.load(f)
    
    return seller_info

def save_page_html(html_content, output_dir):
    """保存页面HTML到文件"""
    html_file = os.path.join(output_dir, "seller_page.html")
    
    with open(html_file, 'w', encoding='utf-8') as f:
        f.write(html_content)
    
    print(f"页面HTML已保存到 {html_file}")
    return html_file

def extract_prompt_urls(html_content):
    """从HTML内容中提取prompt URLs"""
    print("从HTML内容提取prompt URLs...")
    
    soup = BeautifulSoup(html_content, 'html.parser')
    
    # 查找所有prompt卡片的链接
    prompt_links = []
    
    # 方法1：查找所有href属性包含"/prompt/"的a标签
    for a_tag in soup.find_all('a', href=re.compile(r'/prompt/')):
        url = a_tag.get('href')
        if url and not url.endswith('/prompts'):  # 排除类别页面
            full_url = f"https://promptbase.com{url}" if url.startswith('/') else url
            prompt_links.append(full_url)
    
    # 方法2：查找特定结构中的链接（根据网站实际结构调整）
    prompt_sections = soup.find_all(['section', 'div'], class_=re.compile(r'prompt|card|grid|collection', re.I))
    for section in prompt_sections:
        for a_tag in section.find_all('a', href=re.compile(r'/prompt/')):
            url = a_tag.get('href')
            if url and not url.endswith('/prompts'):  # 排除类别页面
                full_url = f"https://promptbase.com{url}" if url.startswith('/') else url
                prompt_links.append(full_url)
    
    # 去重
    prompt_links = list(set(prompt_links))
    
    print(f"找到 {len(prompt_links)} 个unique prompt URLs")
    return prompt_links

def save_prompt_urls(prompt_links, output_dir):
    """保存prompt URLs到CSV文件"""
    csv_file = os.path.join(output_dir, "prompt_list.csv")
    
    with open(csv_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(["prompt_url"])
        for url in prompt_links:
            writer.writerow([url])
    
    print(f"Prompt URLs已保存到 {csv_file}")

def main():
    parser = argparse.ArgumentParser(description="从卖家页面提取prompt URLs")
    parser.add_argument("--data_dir", default="../data", help="数据目录路径")
    args = parser.parse_args()
    
    try:
        # 加载卖家信息
        seller_info = load_seller_info(args.data_dir)
        seller_url = seller_info.get("profile_url")
        
        if not seller_url:
            raise ValueError("卖家信息中缺少profile_url字段")
        
        # 访问卖家页面
        print(f"访问卖家页面: {seller_url}")
        response = requests.get(seller_url, headers=get_headers())
        response.raise_for_status()
        
        # 保存页面HTML
        html_file = save_page_html(response.text, args.data_dir)
        
        # 提取prompt URLs
        prompt_links = extract_prompt_urls(response.text)
        
        # 保存prompt URLs
        save_prompt_urls(prompt_links, args.data_dir)
        
        print("Prompt URLs提取完成！")
    
    except Exception as e:
        print(f"提取prompt URLs时发生错误: {str(e)}")

if __name__ == "__main__":
    main() 