import csv
import time
import random
import os 
import re
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, TimeoutException, WebDriverException
from webdriver_manager.chrome import ChromeDriverManager

# --- Configuration ---
csv_file_path = '../data/prompt_list.csv'  # Input CSV file
sql_file_path = '../data/prompt_data.sql'  # Output SQL file
min_delay = 5                      # Minimum delay between requests (seconds)
max_delay = 15                     # Maximum delay between requests (seconds)
batch_size = 5                     # Number of URLs to process in each batch

# --- Read URLs ---
prompt_urls = []
if not os.path.exists(csv_file_path):
    print(f"Error: CSV file '{csv_file_path}' not found.")
    exit()

try:
    with open(csv_file_path, 'r', newline='', encoding='utf-8') as csvfile:
        reader = csv.reader(csvfile)
        header = next(reader)  # Skip header row
        for row in reader:
            if row:
                prompt_urls.append(row[0])
    print(f"Loaded {len(prompt_urls)} URLs from '{csv_file_path}'.")
except Exception as e:
    print(f"Error reading CSV file: {e}")
    exit()

# --- Initialize SQL file ---
if not os.path.exists(sql_file_path):
    # 确保目录存在
    os.makedirs(os.path.dirname(sql_file_path), exist_ok=True)
    with open(sql_file_path, 'w', encoding='utf-8') as f:
        f.write("-- PromptBase Data SQL Insert Statements\n\n")
        f.write("-- Create table\n")
        f.write('''CREATE TABLE IF NOT EXISTS prompts (
    url TEXT PRIMARY KEY,
    name TEXT,
    rating REAL,
    likes INTEGER,
    word_count INTEGER,
    tags TEXT,
    description TEXT,
    price REAL
);\n\n''')
        f.write("-- Data insertion\n")

# --- Check for already processed URLs ---
processed_urls = set()
try:
    with open(sql_file_path, 'r', encoding='utf-8') as f:
        sql_content = f.read()
        # Extract processed URLs from the SQL file
        for url in prompt_urls:
            if url.replace("'", "''") in sql_content: # SQL strings escape single quotes by doubling them
                processed_urls.add(url)
    print(f"Found {len(processed_urls)} processed URLs in the SQL file.")
except FileNotFoundError:
    print("No existing SQL file found. Starting fresh.") # Changed message for clarity
except Exception as e:
    print(f"Error reading existing SQL file: {e}")


# Filter out unprocessed URLs
urls_to_process = [url for url in prompt_urls if url not in processed_urls]
print(f"URLs to process: {len(urls_to_process)}.")

# --- Scraping Functions ---
def extract_description(driver):
    # Try to grab the div with class="content"
    try:
        content_div = driver.find_element(By.CSS_SELECTOR, "div.content")
        text = content_div.text.strip().replace("'", "''")
        text = text.replace('...more', '').strip()
        return text
    except Exception:
        pass
    # Fallback
    try:
        desc = driver.find_element(By.CSS_SELECTOR, "div.description")
        text = desc.text.strip().replace("'", "''")
        text = text.replace('...more', '').strip()
        return text
    except Exception:
        pass
    return 'N/A'

def extract_price(driver):
    try:
        # Try to directly locate the element containing the price number
        price_element = driver.find_element(By.CSS_SELECTOR, "div.price-tag span.price")
        price_text = price_element.text.strip()
        if price_text:
            return float(price_text)
    except NoSuchElementException:
        print("DEBUG: Price element (div.price-tag span.price) not found.")
    except ValueError:
        print(f"DEBUG: Could not convert price text '{price_text}' to float.")
    except Exception as e:
        print(f"DEBUG: Error in extract_price (attempt 1): {e}")

    try:
        # Alternative: Find parent container with "$" and use regex
        price_container = driver.find_element(By.CSS_SELECTOR, "div.price-tag") # Or more specific .price-section
        price_text_full = price_container.text.strip()
        
        price_match = re.search(r'\$(\d+\.\d+)', price_text_full)
        if price_match:
            return float(price_match.group(1))
        else:
            price_match_num_only = re.search(r'(\d+\.\d+)', price_text_full)
            if price_match_num_only:
                if "$" in price_text_full or price_container.find_elements(By.XPATH, ".//span[contains(text(), '$')]"):
                    return float(price_match_num_only.group(1))
    except NoSuchElementException:
        print("DEBUG: Price container (div.price-tag) not found for regex.")
    except Exception as e:
        print(f"DEBUG: Error in extract_price (attempt 2 using regex): {e}")

    print("DEBUG: Could not extract price, returning 0.0")
    return 0.0

def extract_prompt_data(driver, url):
    """Extracts data from the Prompt page."""
    try:
        # Visit homepage first to simulate normal user behavior
        # driver.get("https://promptbase.com/") # This might be too slow / unnecessary per URL
        # time.sleep(random.uniform(1,2)) # Reduced delay if homepage visit is kept
        
        driver.get(url)
        
        WebDriverWait(driver, 20).until(
            EC.presence_of_element_located((By.TAG_NAME, "h1"))
        )
        
        # Scroll page to load more content
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
        time.sleep(random.uniform(0.5, 1.0))
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(random.uniform(0.5, 1.0))
        
        # Extract data
        name = driver.find_element(By.TAG_NAME, "h1").text.strip()
        
        try:
            rating_text = driver.find_element(By.CSS_SELECTOR, ".rating.has-rating").text.strip()
            rating = float(re.search(r"[\d\.]+", rating_text).group()) if re.search(r"[\d\.]+", rating_text) else 0.0
        except Exception:
            rating = 0.0
        
        try:
            likes_text = driver.find_element(By.XPATH, "//div[contains(text(), 'Favorites')]/preceding-sibling::div").text.strip()
            likes = int(likes_text) if likes_text.isdigit() else 0
        except Exception:
            likes = 0
            
        try:
            wordcount_elem = driver.find_element(By.XPATH, "//*[contains(text(), 'words') or contains(text(), 'word count')]")
            wordcount_match = re.search(r'(\d+)', wordcount_elem.text)
            word_count = int(wordcount_match.group(1)) if wordcount_match else 0
        except Exception:
            word_count = 0
            
        try:
            tag_elements = driver.find_elements(By.CSS_SELECTOR, "a[href^='/marketplace']")
            tags = [tag.text.strip() for tag in tag_elements if tag.text.strip()]
            tags_text = ','.join(tags) if tags else 'N/A'
        except Exception:
            tags_text = 'N/A'
            
        description = extract_description(driver)
        price = extract_price(driver)
        
        print(f"Successfully extracted data for: {name}")
        
        return {
            'url': url,
            'name': name,
            'rating': rating,
            'likes': likes,
            'word_count': word_count,
            'tags': tags_text,
            'description': description,
            'price': price
        }
        
    except Exception as e:
        print(f"Error extracting data for {url}: {e}")
        return None

# --- Batch Scraping ---
success_count = 0
fail_count = 0
batch_results = []  # 定义batch_results变量以修复错误

for batch_start in range(0, len(urls_to_process), batch_size):
    batch_urls = urls_to_process[batch_start:batch_start + batch_size]
    print(f"\nStarting batch {batch_start//batch_size + 1}, URLs: {batch_start + 1} - {batch_start + len(batch_urls)}.")
    
    # 初始化Chrome浏览器选项
    options = webdriver.ChromeOptions()
    options.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36')
    options.add_argument('--disable-blink-features=AutomationControlled')
    options.add_argument('--disable-extensions')
    options.add_argument('--disable-gpu')
    # options.add_argument('--headless') # Optional: run in headless mode
    
    # 指定Chrome浏览器路径 - macOS上的常见路径
    chrome_paths = [
        '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
        '/Applications/Chrome.app/Contents/MacOS/Chrome',
        '/Users/winegee/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
        '/Applications/GPT Chrome.app/Contents/MacOS/GptBrowser',
        os.path.expanduser('~/Applications/GPT Chrome.app/Contents/MacOS/GptBrowser')
    ]
    
    chrome_found = False
    for chrome_path in chrome_paths:
        if os.path.exists(chrome_path):
            options.binary_location = chrome_path
            chrome_found = True
            print(f"Chrome browser found at: {chrome_path}")
            break
    
    if not chrome_found:
        print("Chrome browser not found in common locations. Please install Chrome or specify its path manually.")
        exit(1)
    
    driver = None # Initialize driver to None for error handling
    batch_results = []  # 存储批次结果，以便后续引用
    
    try:
        print("Launching new browser instance...")
        # 使用适配Chrome版本的ChromeDriver
        service = Service(ChromeDriverManager(version="stable").install())
        driver = webdriver.Chrome(service=service, options=options)
        driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        
        for url_to_process in batch_urls: 
            print(f"\nProcessing: {url_to_process}")
            
            data = extract_prompt_data(driver, url_to_process)
            batch_results.append(data)  # 添加结果到batch_results
            
            if data:
                # Sanitize data for SQL insertion
                # Convert None or non-numeric to 0 or 0.0 appropriately for numeric fields before SQL formatting
                db_url = data['url'].replace("'", "''")
                db_name = data['name'].replace("'", "''")
                db_rating = data['rating'] if isinstance(data['rating'], (int, float)) else 0.0
                db_likes = data['likes'] if isinstance(data['likes'], int) else 0
                db_word_count = data['word_count'] if isinstance(data['word_count'], int) else 0
                db_tags = data['tags'].replace("'", "''")
                db_description = data['description'].replace("'", "''")
                db_price = data['price'] if isinstance(data['price'], (int, float)) else 0.0

                sql = f'''INSERT OR REPLACE INTO prompts (url, name, rating, likes, word_count, tags, description, price) 
VALUES ('{db_url}', '{db_name}', {db_rating}, {db_likes}, {db_word_count}, '{db_tags}', '{db_description}', {db_price});\n'''
                
                with open(sql_file_path, 'a', encoding='utf-8') as f:
                    f.write(sql)
                
                print(f"  > Successfully extracted and stored data.")
                success_count += 1
            else:
                print(f"  > Warning: Could not extract data for: {url_to_process}")
                fail_count += 1
            
            sleep_time = random.uniform(min_delay, max_delay)
            print(f"  > Processing complete, pausing for {sleep_time:.2f} seconds...")
            time.sleep(sleep_time)
        
        if driver: driver.quit()
        print(f"Batch {batch_start//batch_size + 1} processed, browser closed.")
        
        if batch_start + batch_size < len(urls_to_process):
            batch_wait = random.uniform(15, 30)
            print(f"Inter-batch delay for {batch_wait:.2f} seconds...")
            time.sleep(batch_wait)
        
    except WebDriverException as e:
        print(f"WebDriver error during batch processing: {e}")
        fail_count += len(batch_urls) - sum(1 for url in batch_urls if url in processed_urls or any(url in res['url'] for res in batch_results if res)) # Adjust fail count more accurately
        if driver: 
            try: driver.quit()
            except: pass
    except Exception as e:
        print(f"Error during batch processing: {e}")
        fail_count += len(batch_urls) - sum(1 for url in batch_urls if url in processed_urls or any(url in res['url'] for res in batch_results if res)) # Adjust fail count
        if driver: 
            try: driver.quit()
            except: pass

print("\nScraping finished!")
print(f"Successfully processed: {success_count} URLs")
print(f"Failed to process: {fail_count} URLs")
print(f"SQL data saved to: '{sql_file_path}'")
