from abc import ABC, abstractmethod
from typing import Dict, List, Any, Optional, Union
import time
import random
import logging
from config import Config

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class CrawlerBase(ABC):
    def __init__(self, config: Optional[Config] = None):
        self.config = config or Config()
        self.session = None
        self.proxies = []
        self.current_proxy_index = 0
        self.user_agents = self.config.get('request_settings.user_agents', [])
        self.retry_times = self.config.get('request_settings.retry_times', 3)
        self.delay_range = self.config.get('request_settings.delay_range', [1, 3])
        
    @abstractmethod
    def get_page(self, url: str, **kwargs) -> Optional[str]:
        pass
    
    @abstractmethod
    def close(self):
        pass
    
    def get_random_user_agent(self) -> str:
        if self.user_agents:
            return random.choice(self.user_agents)
        return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
    
    def get_next_proxy(self) -> Optional[Dict[str, str]]:
        if not self.proxies:
            return None
        
        proxy = self.proxies[self.current_proxy_index]
        self.current_proxy_index = (self.current_proxy_index + 1) % len(self.proxies)
        return proxy
    
    def add_delay(self):
        if self.delay_range:
            delay = random.uniform(self.delay_range[0], self.delay_range[1])
            time.sleep(delay)
    
    def retry_request(self, func, *args, **kwargs):
        last_exception = None
        for attempt in range(self.retry_times):
            try:
                result = func(*args, **kwargs)
                if result is not None:
                    return result
            except Exception as e:
                last_exception = e
                logger.warning(f"Attempt {attempt + 1} failed: {str(e)}")
                if attempt < self.retry_times - 1:
                    self.add_delay()
        
        logger.error(f"All {self.retry_times} attempts failed. Last error: {last_exception}")
        return None
    
    def extract_links(self, content: str, base_url: str = "") -> List[str]:
        from bs4 import BeautifulSoup
        from urllib.parse import urljoin, urlparse
        
        soup = BeautifulSoup(content, 'html.parser')
        links = []
        
        for link in soup.find_all('a', href=True):
            href = link['href']
            if href.startswith('http'):
                links.append(href)
            elif base_url:
                full_url = urljoin(base_url, href)
                links.append(full_url)
        
        return list(set(links))
    
    def parse_page(self, content: str, selectors: Dict[str, str]) -> Dict[str, Any]:
        from bs4 import BeautifulSoup
        
        soup = BeautifulSoup(content, 'html.parser')
        result = {}
        
        for key, selector in selectors.items():
            try:
                if selector.startswith('css:'):
                    elements = soup.select(selector[4:])
                elif selector.startswith('xpath:'):
                    # Note: BeautifulSoup doesn't support XPath, need lxml for that
                    result[key] = "XPath not supported with BeautifulSoup"
                    continue
                else:
                    elements = soup.select(selector)
                
                if elements:
                    if len(elements) == 1:
                        result[key] = elements[0].get_text(strip=True)
                    else:
                        result[key] = [elem.get_text(strip=True) for elem in elements]
                else:
                    result[key] = None
                    
            except Exception as e:
                logger.error(f"Error parsing selector '{selector}': {str(e)}")
                result[key] = None
        
        return result