#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import json
import csv
import time
import random
import argparse
from concurrent.futures import ThreadPoolExecutor
import requests
from bs4 import BeautifulSoup
import pandas as pd
from sqlalchemy import create_engine, Column, Integer, String, Float, Text, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from tqdm import tqdm

# 定义SQLAlchemy模型
Base = declarative_base()

class Prompt(Base):
    __tablename__ = 'prompts'
    
    id = Column(Integer, primary_key=True)
    url = Column(String(255), unique=True, nullable=False)
    name = Column(String(255))
    price = Column(Float)
    rating = Column(Float)
    rating_count = Column(Integer)
    likes = Column(Integer)
    word_count = Column(Integer)
    description = Column(Text)
    tags = Column(Text)
    model = Column(String(100))
    category = Column(String(100))
    last_updated = Column(String(100))
    created_date = Column(String(100))
    seller_username = Column(String(100))
    collected_at = Column(DateTime)

def get_headers():
    """返回随机User-Agent的请求头"""
    user_agents = [
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.2 Safari/605.1.15"
    ]
    return {
        "User-Agent": random.choice(user_agents),
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
        "Accept-Language": "en-US,en;q=0.5",
        "Accept-Encoding": "gzip, deflate, br",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-User": "?1"
    }

def load_prompt_urls(data_dir):
    """加载已保存的prompt URLs"""
    csv_file = os.path.join(data_dir, "prompt_list.csv")
    
    if not os.path.exists(csv_file):
        raise FileNotFoundError(f"Prompt URL文件 {csv_file} 不存在，请先运行extract_prompt_urls.py")
    
    df = pd.read_csv(csv_file)
    return df["prompt_url"].tolist()

def collect_prompt_details(url):
    """收集单个prompt的详细信息"""
    print(f"收集prompt详情: {url}")
    
    try:
        # 访问prompt页面
        response = requests.get(url, headers=get_headers())
        response.raise_for_status()
        
        # 解析页面内容
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取prompt详情
        prompt_data = {"url": url}
        
        # 提取名称
        try:
            prompt_data["name"] = soup.select_one("h1").text.strip()
        except:
            prompt_data["name"] = ""
        
        # 提取价格
        try:
            price_text = soup.select_one(".price").text.strip()
            prompt_data["price"] = float(price_text.replace("$", "").strip())
        except:
            prompt_data["price"] = None
        
        # 提取评分
        try:
            rating_elem = soup.select_one(".rating-stars")
            if rating_elem:
                rating_text = rating_elem.get("title", "0")
                prompt_data["rating"] = float(rating_text.split()[0])
                
                # 提取评价数量
                rating_count_text = soup.select_one(".rating-count").text.strip()
                prompt_data["rating_count"] = int(rating_count_text.replace("(", "").replace(")", ""))
            else:
                prompt_data["rating"] = None
                prompt_data["rating_count"] = 0
        except:
            prompt_data["rating"] = None
            prompt_data["rating_count"] = 0
        
        # 提取点赞数
        try:
            likes_text = soup.select_one(".likes-count").text.strip()
            prompt_data["likes"] = int(likes_text)
        except:
            prompt_data["likes"] = 0
        
        # 提取字数
        try:
            word_count_elem = soup.select_one(".word-count")
            if word_count_elem:
                word_count_text = word_count_elem.text.strip()
                prompt_data["word_count"] = int(word_count_text.split()[0])
            else:
                prompt_data["word_count"] = None
        except:
            prompt_data["word_count"] = None
        
        # 提取描述
        try:
            description_elem = soup.select_one(".description")
            if description_elem:
                prompt_data["description"] = description_elem.text.strip()
            else:
                prompt_data["description"] = ""
        except:
            prompt_data["description"] = ""
        
        # 提取标签
        try:
            tags = []
            tag_elems = soup.select(".tag")
            for tag in tag_elems:
                tags.append(tag.text.strip())
            prompt_data["tags"] = ",".join(tags)
        except:
            prompt_data["tags"] = ""
        
        # 提取模型
        try:
            model_elem = soup.select_one(".model")
            if model_elem:
                prompt_data["model"] = model_elem.text.strip()
            else:
                prompt_data["model"] = ""
        except:
            prompt_data["model"] = ""
        
        # 提取类别
        try:
            category_elem = soup.select_one(".category")
            if category_elem:
                prompt_data["category"] = category_elem.text.strip()
            else:
                prompt_data["category"] = ""
        except:
            prompt_data["category"] = ""
        
        # 提取最后更新时间
        try:
            last_updated_elem = soup.select_one(".last-updated")
            if last_updated_elem:
                prompt_data["last_updated"] = last_updated_elem.text.strip()
            else:
                prompt_data["last_updated"] = ""
        except:
            prompt_data["last_updated"] = ""
        
        # 提取创建日期
        try:
            created_date_elem = soup.select_one(".created-date")
            if created_date_elem:
                prompt_data["created_date"] = created_date_elem.text.strip()
            else:
                prompt_data["created_date"] = ""
        except:
            prompt_data["created_date"] = ""
        
        # 提取卖家用户名
        try:
            seller_elem = soup.select_one(".seller-username")
            if seller_elem:
                prompt_data["seller_username"] = seller_elem.text.strip()
            else:
                prompt_data["seller_username"] = ""
        except:
            prompt_data["seller_username"] = ""
        
        # 添加收集时间
        prompt_data["collected_at"] = pd.Timestamp.now()
        
        return prompt_data
    
    except Exception as e:
        print(f"收集prompt详情时发生错误 ({url}): {str(e)}")
        return {"url": url, "error": str(e)}

def save_prompt_details(prompt_details_list, output_dir):
    """保存prompt详情到SQLite数据库"""
    os.makedirs(output_dir, exist_ok=True)
    db_file = os.path.join(output_dir, "prompts.db")
    
    # 创建数据库连接
    engine = create_engine(f"sqlite:///{db_file}")
    Base.metadata.create_all(engine)
    
    # 创建会话
    Session = sessionmaker(bind=engine)
    session = Session()
    
    try:
        # 将数据转换为DataFrame
        df = pd.DataFrame(prompt_details_list)
        
        # 将DataFrame写入数据库
        df.to_sql('prompts', engine, if_exists='append', index=False)
        
        print(f"成功保存 {len(prompt_details_list)} 条prompt详情到数据库")
    
    except Exception as e:
        print(f"保存数据时发生错误: {str(e)}")
        session.rollback()
        raise
    
    finally:
        session.close()

def main():
    parser = argparse.ArgumentParser(description="收集PromptBase上prompt的详细信息")
    parser.add_argument("--data_dir", default="../data", help="数据目录路径")
    parser.add_argument("--max_workers", type=int, default=5, help="最大线程数")
    args = parser.parse_args()
    
    try:
        # 加载prompt URLs
        urls = load_prompt_urls(args.data_dir)
        print(f"加载了 {len(urls)} 个prompt URL")
        
        # 使用线程池收集prompt详情
        prompt_details_list = []
        with ThreadPoolExecutor(max_workers=args.max_workers) as executor:
            def process_url(url):
                # 随机延迟，避免被封IP
                time.sleep(random.uniform(1, 3))
                return collect_prompt_details(url)
            
            # 使用tqdm显示进度
            results = list(tqdm(
                executor.map(process_url, urls),
                total=len(urls),
                desc="收集prompt详情"
            ))
            
            # 过滤掉错误的结果
            prompt_details_list = [r for r in results if "error" not in r]
        
        if prompt_details_list:
            # 保存prompt详情
            save_prompt_details(prompt_details_list, args.data_dir)
            print("prompt详情收集完成！")
        else:
            print("没有成功收集到任何prompt详情")
    
    except Exception as e:
        print(f"程序执行出错: {str(e)}")

if __name__ == "__main__":
    main()