#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re
import csv
import os
from bs4 import BeautifulSoup

def extract_prompt_urls(html_file_path):
    """从HTML文件中提取prompt URL"""
    print(f"从 {html_file_path} 提取prompt URLs...")
    
    with open(html_file_path, 'r', encoding='utf-8') as f:
        html_content = f.read()
    
    soup = BeautifulSoup(html_content, 'html.parser')
    
    # 查找所有符合格式 https://promptbase.com/prompt/* 的URL
    prompt_links = []
    
    # 方法1：查找所有href属性包含"/prompt/"的a标签
    for a_tag in soup.find_all('a', href=re.compile(r'/prompt/')):
        url = a_tag.get('href')
        if url and not url.endswith('/prompts'):  # 排除类别页面
            # 确保URL是完整的
            full_url = f"https://promptbase.com{url}" if url.startswith('/') else url
            prompt_links.append(full_url)
    
    # 去重
    prompt_links = list(set(prompt_links))
    
    print(f"找到 {len(prompt_links)} 个unique prompt URLs")
    return prompt_links

def save_to_csv(prompt_links, output_file):
    """保存prompt URL到CSV文件"""
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(["prompt_url"])
        for url in prompt_links:
            writer.writerow([url])
    
    print(f"Prompt URLs已保存到 {output_file}")

def main():
    # 输入和输出文件路径
    html_file_path = "./rintelligence _ PromptBase Profile.html"
    output_dir = "/Users/winegee/Desktop/socialNet/lab1/prompt_market_analytics/data"
    
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)
    
    # 输出CSV文件路径
    output_file = os.path.join(output_dir, "prompt_list.csv")
    
    # 提取prompt URL
    prompt_links = extract_prompt_urls(html_file_path)
    
    # 保存为CSV文件
    save_to_csv(prompt_links, output_file)
    
    print("处理完成！")

if __name__ == "__main__":
    main()