#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import json
import time
import argparse
import requests
from bs4 import BeautifulSoup
import random

def get_headers():
    """返回随机User-Agent的请求头"""
    user_agents = [
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.2 Safari/605.1.15"
    ]
    return {
        "User-Agent": random.choice(user_agents),
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
        "Accept-Language": "en-US,en;q=0.5",
        "Accept-Encoding": "gzip, deflate, br",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-User": "?1"
    }

def get_seller_username(target_rank):
    """获取指定排名卖家的用户名"""
    url = "https://promptbase.com/leaderboard"
    print(f"访问排行榜页面，查找排名第{target_rank}的卖家...")
    
    try:
        response = requests.get(url, headers=get_headers())
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 找到排行榜中的所有卖家行
        # 根据页面结构，卖家信息可能在表格行或列表项中
        rows = soup.select("tr[data-rank]") or soup.select(".leaderboard-item")
        
        if not rows:
            # 如果找不到特定类名的元素，尝试查找所有可能包含卖家信息的元素
            rows = soup.select("tr") or soup.select(".item") or soup.select("[data-rank]")
        
        if len(rows) < target_rank:
            raise Exception(f"排行榜中只有{len(rows)}个卖家，无法找到排名第{target_rank}的卖家")
        
        # 获取目标排名卖家的信息
        target_row = rows[target_rank - 1]  # 因为索引从0开始
        
        # 尝试多种可能的选择器来获取用户名和链接
        username_element = (
            target_row.select_one("a[href*='/profile/']") or
            target_row.select_one("a[href*='/user/']") or
            target_row.select_one("td:nth-child(2) a") or
            target_row.select_one(".username a") or
            target_row.select_one("a")
        )
        
        if not username_element:
            raise Exception("无法找到卖家用户名元素")
        
        username = username_element.text.strip()
        seller_url = username_element.get('href')
        
        # 确保URL是完整的
        if seller_url and not seller_url.startswith('http'):
            seller_url = f"https://promptbase.com{seller_url}"
        
        print(f"找到目标卖家: {username}，个人页面: {seller_url}")
        return username, seller_url
    
    except Exception as e:
        print(f"无法获取卖家信息: {str(e)}")
        return None, None

def collect_seller_info(seller_url):
    """收集卖家的基本信息"""
    print(f"访问卖家个人页面: {seller_url}")
    
    try:
        response = requests.get(seller_url, headers=get_headers())
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取基本信息
        username = soup.select_one(".profile-header h1").text.strip()
        
        # 尝试获取bio
        try:
            bio = soup.select_one(".profile-bio p").text.strip()
        except:
            bio = ""
            print("未找到卖家bio")
        
        # 获取统计数据
        stats = {}
        stat_elements = soup.select(".profile-stats .stat-item")
        for stat in stat_elements:
            try:
                label = stat.select_one(".stat-label").text.strip().lower()
                value = stat.select_one(".stat-value").text.strip()
                stats[label] = value
            except:
                continue
        
        # 获取其他信息（排名、加入日期等）
        other_info = {}
        info_elements = soup.select(".profile-info .info-item")
        for info in info_elements:
            try:
                text = info.text.strip()
                if ":" in text:
                    key, value = text.split(":", 1)
                    other_info[key.strip().lower()] = value.strip()
            except:
                continue
        
        # 组合所有信息
        seller_info = {
            "username": username,
            "bio": bio,
            "stats": stats,
            "other_info": other_info,
            "profile_url": seller_url
        }
        
        print(f"成功收集卖家 {username} 的信息")
        return seller_info
    
    except Exception as e:
        print(f"无法收集卖家信息: {str(e)}")
        return None

def save_seller_info(seller_info, output_dir):
    """保存卖家信息到JSON文件"""
    os.makedirs(output_dir, exist_ok=True)
    output_file = os.path.join(output_dir, "seller_info.json")
    
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(seller_info, f, ensure_ascii=False, indent=2)
    
    print(f"卖家信息已保存到 {output_file}")

def calculate_target_rank(student_id):
    """根据学生ID计算目标卖家排名"""
    return int(eval(str(student_id)) % 69)

def main():
    parser = argparse.ArgumentParser(description="收集PromptBase卖家的基本信息")
    parser.add_argument("--student_id", required=True, help="学生ID，用于计算目标卖家排名")
    parser.add_argument("--output_dir", default="../data", help="输出目录路径")
    args = parser.parse_args()
    
    # 计算目标排名
    target_rank = calculate_target_rank(args.student_id)
    print(f"根据学生ID {args.student_id}，目标卖家排名为 {target_rank}")
    
    try:
        # 获取卖家用户名和URL
        username, seller_url = get_seller_username(target_rank)
        
        if username and seller_url:
            # 收集卖家信息
            seller_info = collect_seller_info(seller_url)
            
            if seller_info:
                # 保存卖家信息
                save_seller_info(seller_info, args.output_dir)
                print("卖家信息收集完成！")
            else:
                print("无法收集卖家信息，请检查网络连接或网站结构是否已变更")
        else:
            print("无法获取卖家用户名和URL，请检查排行榜页面是否可访问")
    
    except Exception as e:
        print(f"程序执行出错: {str(e)}")

if __name__ == "__main__":
    main() 