import os
import time
import random
from datetime import datetime, timedelta
import logging
import requests
import shutil
import zipfile
import platform
import stat
from typing import Optional, List, Dict, Any

from dotenv import load_dotenv
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException, WebDriverException
from supabase import create_client, Client

import config  # Import the new config file

# --- Configuration ---

# Load environment variables (optional, as config now handles it)
load_dotenv()

# Validate config
config.validate_config()

# --- Aliases for convenience ---
WRITABLE_DIR = '/tmp/bin'
PROJECT_BIN_DIR = './bin'

# --- Logging Setup ---
# Ensure log directory exists
log_dir = os.path.dirname(config.LOGGING_CONFIG['log_file'])
if log_dir:
    os.makedirs(log_dir, exist_ok=True)

logging.basicConfig(
    level=config.LOGGING_CONFIG['level'],
    format=config.LOGGING_CONFIG['format'],
    handlers=[
        logging.FileHandler(config.LOGGING_CONFIG['log_file']),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


# --- Chromedriver Management (adapted from serverless.py) ---

def get_chromedriver_path() -> str:
    """Get the path to the chromedriver executable, preferring the project directory."""
    project_driver = os.path.join(PROJECT_BIN_DIR, 'chromedriver')
    tmp_driver = os.path.join(WRITABLE_DIR, 'chromedriver')
    
    if os.path.exists(project_driver) and os.access(project_driver, os.X_OK):
        return project_driver
    return tmp_driver

def get_download_url(platform_name: str, file_type: str) -> Optional[str]:
    """Retrieves the download URL for a given platform and file type."""
    try:
        # This JSON endpoint provides the latest stable versions and their download URLs.
        # Fetch the latest stable version to ensure compatibility
        url = "https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json"
        response = requests.get(url)
        response.raise_for_status()
        data = response.json()

        # Get downloads for the 'Stable' channel
        stable_channel = data.get('channels', {}).get('Stable', {})
        if not stable_channel:
            logger.error("Could not find 'Stable' channel in the JSON response.")
            return None
        
        downloads = stable_channel.get('downloads', {}).get(file_type)
        if not downloads:
            logger.error(f"Could not find downloads for file type '{file_type}' in the Stable channel.")
            return None

        # Find the download URL for the correct platform
        for download in downloads:
            if download.get('platform') == platform_name:
                return download.get('url')

        logger.error(f"Could not find download URL for platform '{platform_name}' and file type '{file_type}'.")
        return None
    except Exception as e:
        logger.error(f"Failed to retrieve download URL: {e}")
        return None

def _download_file(url: str, dest_path: str):
    """Downloads a file from a URL to a destination path."""
    logger.info(f"Downloading from {url} to {dest_path}...")
    try:
        r = requests.get(url, stream=True)
        r.raise_for_status()
        with open(dest_path, 'wb') as f:
            shutil.copyfileobj(r.raw, f)
        logger.info("Download complete.")
    except Exception as e:
        logger.error(f"Failed to download {url}: {e}")
        raise

def _extract_file_from_zip(zip_path: str, file_name: str, dest_path: str):
    """Extracts a single file from a zip archive to a destination path."""
    logger.info(f"Extracting '{file_name}' from {zip_path}...")
    try:
        with zipfile.ZipFile(zip_path, 'r') as zip_ref:
            # The file inside the zip is often in a directory, e.g., 'chromedriver-mac-x64/chromedriver'.
            # We need to find the correct file path within the zip.
            candidate_files = [f for f in zip_ref.namelist() if f.endswith(file_name) and not f.startswith('__MACOSX')]
            if not candidate_files:
                raise FileNotFoundError(f"Could not find a file ending with '{file_name}' in {zip_path}")
            
            # In case there are multiple, pick the most likely one (shortest path is a good heuristic)
            file_to_extract = min(candidate_files, key=len)
            logger.info(f"Found '{file_to_extract}' in zip archive.")

            with zip_ref.open(file_to_extract) as source, open(dest_path, 'wb') as target:
                shutil.copyfileobj(source, target)
            
            os.chmod(dest_path, 0o755) # Set executable permission
            logger.info(f"Successfully extracted '{file_name}' to {dest_path}")
    except Exception as e:
        logger.error(f"Failed to extract '{file_name}' from {zip_path}: {e}")
        raise

def setup_chromium(platform_name: str) -> Optional[str]:
    """Downloads and sets up Chromedriver for the specified platform."""
    os.makedirs(WRITABLE_DIR, exist_ok=True)
    try:
        os.makedirs(PROJECT_BIN_DIR, exist_ok=True)
    except OSError as e:
        logger.warning(f"Could not create project bin directory: {e}. Using /tmp only.")

    # Force removal of old chromedriver to ensure a fresh download
    project_chromedriver_path = os.path.join(PROJECT_BIN_DIR, 'chromedriver')
    if os.path.exists(project_chromedriver_path):
        try:
            os.remove(project_chromedriver_path)
            logger.info(f"Removed existing chromedriver at {project_chromedriver_path} to force re-download.")
        except OSError as e:
            logger.warning(f"Could not remove existing chromedriver: {e}")

    chromedriver_path = get_chromedriver_path()
    
    # Check if chromedriver already exists and is executable
    if os.path.exists(chromedriver_path) and os.access(chromedriver_path, os.X_OK):
        logger.info(f"Using existing chromedriver: {chromedriver_path}")
        # A bit of a simplification: assume if chromedriver exists, chrome is also handled.
        # For a truly robust solution, one might check for Chrome as well.
        return chromedriver_path

    logger.info(f"Setting up binaries for {platform_name}...")
    try:
        # Setup Chromedriver
        logger.info("Downloading and extracting Chromedriver...")
        chromedriver_url = get_download_url(platform_name, 'chromedriver')
        if not chromedriver_url:
            raise Exception(f"Could not get Chromedriver download URL for {platform_name}.")
        
        chromedriver_zip_path = os.path.join(WRITABLE_DIR, f"chromedriver-{platform_name}.zip")
        _download_file(chromedriver_url, chromedriver_zip_path)
        
        # Prefer installing to project directory
        dest_path = os.path.join(PROJECT_BIN_DIR, 'chromedriver')
        try:
            _extract_file_from_zip(chromedriver_zip_path, 'chromedriver', dest_path)
            os.remove(chromedriver_zip_path) # Clean up zip
            return dest_path
        except Exception as e:
            logger.warning(f"Could not extract chromedriver to project directory: {e}. Falling back to /tmp.")
            dest_path = os.path.join(WRITABLE_DIR, 'chromedriver')
            _extract_file_from_zip(chromedriver_zip_path, 'chromedriver', dest_path)
            os.remove(chromedriver_zip_path) # Clean up zip
            return dest_path
        
    except Exception as e:
        logger.error(f"Failed to setup Chromedriver: {e}")
        return None

# --- Main Application Class ---

class BinanceMonitor:
    """Monitors a Binance Square profile, scrapes posts, and uploads to Supabase."""

    def __init__(self):
        self.target_url = config.SCRAPING_CONFIG['target_url']
        self.supabase_client: Client = self._init_supabase()
        self.driver: Optional[webdriver.Chrome] = None
        self.chromedriver_path: Optional[str] = None

    def _init_supabase(self) -> Client:
        """Initializes and returns the Supabase client."""
        try:
            client = create_client(config.SUPABASE_URL, config.SUPABASE_KEY)
            logger.info("Supabase client initialized successfully.")
            return client
        except Exception as e:
            logger.error(f"Failed to initialize Supabase client: {e}")
            raise

    def _setup_driver(self):
        """Sets up the Selenium WebDriver, managing Chromedriver download."""
        logger.info("Initializing Selenium WebDriver...")
        
        system = platform.system().lower()
        machine = platform.machine().lower()

        if system == 'linux':
            platform_name = 'linux64'
        elif system == 'darwin':  # macOS
            if machine == 'arm64':
                platform_name = 'mac-arm64'
            else:
                platform_name = 'mac-x64'
        elif system == 'windows':
            platform_name = 'win64'
        else:
            raise Exception(f"Unsupported operating system: {system}")

        logger.info(f"Setting up binaries for {platform_name}...")
        self.chromedriver_path = setup_chromium(platform_name)
        if not self.chromedriver_path or not os.path.exists(self.chromedriver_path):
            raise Exception("Chromedriver setup failed. Check logs for details.")

        logger.info(f"Using chromedriver from: {self.chromedriver_path}")

        chrome_options = Options()
        if config.SELENIUM_CONFIG['headless']:
            chrome_options.add_argument("--headless")
        
        chrome_options.add_argument(f"window-size={config.SELENIUM_CONFIG['window_size']}")
        chrome_options.add_argument(f"user-agent={config.SCRAPING_CONFIG['user_agent']}")
        
        for option in config.SELENIUM_CONFIG['chrome_options']:
            chrome_options.add_argument(option)

        try:
            service = Service(executable_path=self.chromedriver_path)
            self.driver = webdriver.Chrome(service=service, options=chrome_options)
            logger.info("Selenium WebDriver initialized successfully.")
        except WebDriverException as e:
            logger.error(f"WebDriver failed to initialize: {e}")
            # Add a check for permission errors
            if "Permission denied" in str(e):
                logger.info("Attempting to set executable permission on chromedriver...")
                os.chmod(self.chromedriver_path, 0o755)
                # Retry initialization
                service = Service(executable_path=self.chromedriver_path)
                self.driver = webdriver.Chrome(service=service, options=chrome_options)
                logger.info("WebDriver initialized successfully after permission fix.")
            else:
                raise
        except Exception as e:
            logger.error(f"An unexpected error occurred during WebDriver setup: {e}")
            raise

    def _parse_relative_time(self, time_str: str) -> str:
        """Converts relative time strings (e.g., '5小时前') to ISO 8601 format."""
        now = datetime.now()
        try:
            if '分钟前' in time_str:
                minutes = int(time_str.split('分钟前')[0])
                return (now - timedelta(minutes=minutes)).isoformat()
            elif '小时前' in time_str:
                hours = int(time_str.split('小时前')[0])
                return (now - timedelta(hours=hours)).isoformat()
            elif '天前' in time_str:
                days = int(time_str.split('天前')[0])
                return (now - timedelta(days=days)).isoformat()
            elif '昨天' in time_str:
                return (now - timedelta(days=1)).isoformat()
        except (ValueError, IndexError):
            logger.warning(f"Could not parse relative time '{time_str}'. Defaulting to now.")
        
        # Default to now if parsing fails or format is not recognized
        return now.isoformat()

    def scrape_posts(self) -> List[Dict[str, Any]]:
        """Scrapes post data from the target Binance Square profile."""
        if not self.driver:
            logger.error("Driver not initialized. Cannot scrape.")
            return []

        logger.info(f"Navigating to {self.target_url}")
        self.driver.get(self.target_url)
        # Use random delay from config
        delay = random.uniform(*config.SCRAPING_CONFIG['random_delay_range'])
        logger.info(f"Waiting for {delay:.2f} seconds for page to load...")
        time.sleep(delay)

        posts_data = []
        post_elements = self.driver.find_elements(By.CSS_SELECTOR, 'div.css-18y5vbr')
        logger.info(f"Found {len(post_elements)} post elements on the page.")

        for post in post_elements:
            try:
                title_element = post.find_element(By.CSS_SELECTOR, 'div.css-15s9s1s')
                content_element = post.find_element(By.CSS_SELECTOR, 'div.css-mlcint')
                post_link_element = post.find_element(By.CSS_SELECTOR, 'a.css-1yxx6k1')
                post_url = post_link_element.get_attribute('href')
                
                # Extract date
                date_text = post.find_element(By.CSS_SELECTOR, 'div.css-11zjd2y').text
                published_at = self._parse_relative_time(date_text)

                # Extract stats (likes, comments, etc.)
                # Note: These selectors are fragile and may need updates if Binance changes its layout.
                likes = 0
                try:
                    # This selector is a guess and needs verification.
                    likes_text = post.find_element(By.CSS_SELECTOR, "div[data-bn-type='text']").text
                    if 'k' in likes_text.lower():
                        likes = int(float(likes_text.lower().replace('k', '')) * 1000)
                    elif likes_text.isdigit():
                        likes = int(likes_text)
                except Exception:
                    pass # Keep likes as 0 if not found

                post_data = {
                    'title': title_element.text,
                    'content': content_element.text,
                    'post_url': post_url,
                    'published_at': published_at,
                    'likes': likes,
                    'comments': 0, # Placeholder
                    'shares': 0,   # Placeholder
                    'views': 0,    # Placeholder
                    'quotes': 0,   # Placeholder
                    'scraped_at': datetime.now().isoformat()
                }
                posts_data.append(post_data)
            except Exception as e:
                logger.error(f"Error parsing a post element: {e}", exc_info=True)
                continue
        
        return posts_data

    def upload_to_supabase(self, data: List[Dict[str, Any]]):
        """Uploads scraped data to the Supabase table."""
        if not data:
            logger.info("No new data scraped, nothing to upload.")
            return
        if not self.supabase_client:
            logger.error("Supabase client not available. Cannot upload.")
            return

        logger.info(f"Attempting to upsert {len(data)} records to Supabase...")
        try:
            table_name = config.DATABASE_CONFIG['table_name']
            response = self.supabase_client.table(table_name).upsert(
                data, 
                on_conflict='post_url' # Assumes 'post_url' is a unique key
            ).execute()
            
            # The V2 API returns data in a different structure
            if hasattr(response, 'data') and response.data:
                 logger.info(f"Successfully upserted {len(response.data)} records.")
            else:
                 logger.info("Upsert operation completed, but no data returned in response. Check Supabase for results.")

        except Exception as e:
            logger.error(f"Failed to upload data to Supabase: {e}")

    def run(self):
        """Main execution flow."""
        logger.info("Starting monitor process...")
        try:
            self._setup_driver()
            scraped_data = self.scrape_posts()
            self.upload_to_supabase(scraped_data)
        except Exception as e:
            logger.error(f"An error occurred during the main execution: {e}", exc_info=True)
        finally:
            if self.driver:
                self.driver.quit()
                logger.info("WebDriver closed.")
        logger.info("Monitor process finished.")

def main():
    """Entry point of the script."""
    monitor = BinanceMonitor()
    monitor.run()

if __name__ == "__main__":
    main()
