import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from typing import Dict, List, Any, Optional
import time
import logging

logger = logging.getLogger(__name__)

class WebScraperService:
    """网页爬取服务类"""
    
    def __init__(self, use_selenium: bool = False, headless: bool = True):
        """
        初始化爬虫服务
        
        Args:
            use_selenium: 是否使用 Selenium
            headless: 是否使用无头模式（仅 Selenium）
        """
        self.use_selenium = use_selenium
        self.headless = headless
        self.driver = None
        
        if use_selenium:
            self._setup_selenium()
    
    def _setup_selenium(self):
        """设置 Selenium WebDriver"""
        try:
            chrome_options = Options()
            if self.headless:
                chrome_options.add_argument("--headless")
            chrome_options.add_argument("--no-sandbox")
            chrome_options.add_argument("--disable-dev-shm-usage")
            chrome_options.add_argument("--disable-gpu")
            chrome_options.add_argument("--window-size=1920,1080")
            
            self.driver = webdriver.Chrome(options=chrome_options)
            logger.info("Selenium WebDriver initialized successfully")
        except Exception as e:
            logger.error(f"Error setting up Selenium: {e}")
            self.driver = None
    
    async def scrape_with_requests(self, url: str, headers: Optional[Dict] = None) -> Dict[str, Any]:
        """
        使用 requests 和 BeautifulSoup 爬取网页
        
        Args:
            url: 目标网址
            headers: 请求头（可选）
            
        Returns:
            Dict: 包含网页内容的字典
        """
        try:
            default_headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
            }
            
            if headers:
                default_headers.update(headers)
            
            response = requests.get(url, headers=default_headers, timeout=30)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.content, 'html.parser')
            
            return {
                "success": True,
                "url": url,
                "status_code": response.status_code,
                "title": soup.title.string if soup.title else None,
                "content": soup.get_text().strip(),
                "html": str(soup),
                "links": [link.get('href') for link in soup.find_all('a', href=True)],
                "images": [img.get('src') for img in soup.find_all('img', src=True)]
            }
            
        except Exception as e:
            logger.error(f"Error scraping {url}: {e}")
            return {
                "success": False,
                "url": url,
                "error": str(e)
            }
    
    async def scrape_with_selenium(self, url: str, wait_time: int = 10) -> Dict[str, Any]:
        """
        使用 Selenium 爬取动态网页
        
        Args:
            url: 目标网址
            wait_time: 等待时间（秒）
            
        Returns:
            Dict: 包含网页内容的字典
        """
        if not self.driver:
            return {
                "success": False,
                "url": url,
                "error": "Selenium driver not initialized"
            }
        
        try:
            self.driver.get(url)
            
            # 等待页面加载完成
            WebDriverWait(self.driver, wait_time).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            
            # 获取页面内容
            page_source = self.driver.page_source
            soup = BeautifulSoup(page_source, 'html.parser')
            
            return {
                "success": True,
                "url": url,
                "title": self.driver.title,
                "content": soup.get_text().strip(),
                "html": page_source,
                "current_url": self.driver.current_url,
                "links": [link.get('href') for link in soup.find_all('a', href=True)],
                "images": [img.get('src') for img in soup.find_all('img', src=True)]
            }
            
        except Exception as e:
            logger.error(f"Error scraping {url} with Selenium: {e}")
            return {
                "success": False,
                "url": url,
                "error": str(e)
            }
    
    async def extract_visa_info(self, url: str) -> Dict[str, Any]:
        """
        从签证相关网页提取信息
        
        Args:
            url: 签证信息网页 URL
            
        Returns:
            Dict: 提取的签证信息
        """
        scrape_method = self.scrape_with_selenium if self.use_selenium else self.scrape_with_requests
        result = await scrape_method(url)
        
        if not result.get("success"):
            return result
        
        try:
            soup = BeautifulSoup(result["html"], 'html.parser')
            
            # 提取可能的签证相关信息
            visa_info = {
                "url": url,
                "title": result.get("title"),
                "processing_time": self._extract_processing_time(soup),
                "requirements": self._extract_requirements(soup),
                "fees": self._extract_fees(soup),
                "appointments": self._extract_appointments(soup),
                "last_updated": time.strftime("%Y-%m-%d %H:%M:%S")
            }
            
            return {
                "success": True,
                "visa_info": visa_info
            }
            
        except Exception as e:
            logger.error(f"Error extracting visa info from {url}: {e}")
            return {
                "success": False,
                "url": url,
                "error": str(e)
            }
    
    def _extract_processing_time(self, soup: BeautifulSoup) -> Optional[str]:
        """提取处理时间信息"""
        time_keywords = ["processing time", "处理时间", "审理时间", "wait time"]
        for keyword in time_keywords:
            element = soup.find(text=lambda text: text and keyword.lower() in text.lower())
            if element:
                return element.strip()
        return None
    
    def _extract_requirements(self, soup: BeautifulSoup) -> List[str]:
        """提取签证要求信息"""
        requirements = []
        req_keywords = ["requirement", "要求", "材料", "document"]
        
        for keyword in req_keywords:
            elements = soup.find_all(text=lambda text: text and keyword.lower() in text.lower())
            for element in elements[:5]:  # 限制数量
                if element.strip():
                    requirements.append(element.strip())
        
        return list(set(requirements))  # 去重
    
    def _extract_fees(self, soup: BeautifulSoup) -> Optional[str]:
        """提取费用信息"""
        fee_keywords = ["fee", "cost", "price", "费用", "价格"]
        for keyword in fee_keywords:
            element = soup.find(text=lambda text: text and keyword.lower() in text.lower())
            if element:
                return element.strip()
        return None
    
    def _extract_appointments(self, soup: BeautifulSoup) -> Optional[str]:
        """提取预约信息"""
        appointment_keywords = ["appointment", "预约", "booking", "schedule"]
        for keyword in appointment_keywords:
            element = soup.find(text=lambda text: text and keyword.lower() in text.lower())
            if element:
                return element.strip()
        return None
    
    def close(self):
        """关闭 Selenium WebDriver"""
        if self.driver:
            self.driver.quit()
            logger.info("Selenium WebDriver closed")
    
    def __del__(self):
        """析构函数，确保 WebDriver 被正确关闭"""
        self.close()
