import scrapy
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional, List, Union
import hashlib
import json
import logging
from datetime import datetime
from tenacity import retry, stop_after_attempt, wait_exponential
import structlog


class BaseSpider(scrapy.Spider, ABC):
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.logger = structlog.get_logger(self.__class__.__name__)
        self.stats = {
            "items_scraped": 0,
            "items_failed": 0,
            "requests_made": 0,
            "start_time": datetime.now()
        }
        
    @abstractmethod
    def fetch(self, url: str, **kwargs) -> scrapy.Request:
        pass
    
    @abstractmethod
    def parse(self, response: scrapy.http.Response) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
        pass
    
    @abstractmethod
    def store(self, item: Dict[str, Any]) -> bool:
        pass
    
    def generate_url_hash(self, url: str) -> str:
        return hashlib.md5(url.encode()).hexdigest()
    
    def extract_metadata(self, response: scrapy.http.Response) -> Dict[str, Any]:
        return {
            "url": response.url,
            "url_hash": self.generate_url_hash(response.url),
            "status_code": response.status,
            "fetch_time": datetime.now().isoformat(),
            "encoding": response.encoding,
            "headers": dict(response.headers),
        }
    
    def validate_item(self, item: Dict[str, Any]) -> bool:
        required_fields = ["url", "content", "source", "fetch_time"]
        for field in required_fields:
            if field not in item or item[field] is None:
                self.logger.warning(
                    "Missing required field", 
                    field=field, 
                    item_url=item.get("url", "unknown")
                )
                return False
        return True
    
    def handle_error(self, failure, request: scrapy.Request = None):
        self.stats["items_failed"] += 1
        error_info = {
            "error_type": failure.type.__name__ if hasattr(failure, 'type') else "Unknown",
            "error_value": str(failure.value) if hasattr(failure, 'value') else str(failure),
            "url": request.url if request else "Unknown",
            "timestamp": datetime.now().isoformat()
        }
        
        self.logger.error(
            "Spider error occurred",
            **error_info
        )
        
        if hasattr(self, 'error_callback'):
            self.error_callback(error_info)
        
        return error_info
    
    def log_progress(self, message: str, level: str = "info", **kwargs):
        log_data = {
            "message": message,
            "spider": self.name,
            "stats": self.stats,
            "timestamp": datetime.now().isoformat(),
            **kwargs
        }
        
        if level == "debug":
            self.logger.debug(message, **log_data)
        elif level == "warning":
            self.logger.warning(message, **log_data)
        elif level == "error":
            self.logger.error(message, **log_data)
        else:
            self.logger.info(message, **log_data)
    
    @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
    def safe_extract(self, selector, xpath: str, default: Any = None) -> Any:
        try:
            result = selector.xpath(xpath).get()
            return result if result else default
        except Exception as e:
            self.logger.warning(
                "Failed to extract data",
                xpath=xpath,
                error=str(e)
            )
            return default
    
    def clean_text(self, text: str) -> str:
        if not text:
            return ""
        
        import re
        text = re.sub(r'\s+', ' ', text)
        text = text.strip()
        text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text)
        
        return text
    
    def extract_links(self, response: scrapy.http.Response, pattern: Optional[str] = None) -> List[str]:
        links = []
        
        if pattern:
            import re
            link_pattern = re.compile(pattern)
            all_links = response.css('a::attr(href)').getall()
            links = [
                response.urljoin(link) 
                for link in all_links 
                if link_pattern.match(link)
            ]
        else:
            links = response.css('a::attr(href)').getall()
            links = [response.urljoin(link) for link in links]
        
        return list(set(links))
    
    def create_item(self, response: scrapy.http.Response, extracted_data: Dict[str, Any]) -> Dict[str, Any]:
        metadata = self.extract_metadata(response)
        
        item = {
            **metadata,
            **extracted_data,
            "source": self.name,
            "spider_version": getattr(self, 'version', '1.0.0'),
            "crawl_id": getattr(self, 'crawl_id', None)
        }
        
        if self.validate_item(item):
            self.stats["items_scraped"] += 1
            return item
        else:
            self.stats["items_failed"] += 1
            return None
    
    def get_stats(self) -> Dict[str, Any]:
        runtime = (datetime.now() - self.stats["start_time"]).total_seconds()
        return {
            **self.stats,
            "runtime_seconds": runtime,
            "success_rate": (
                self.stats["items_scraped"] / max(self.stats["items_scraped"] + self.stats["items_failed"], 1)
            ) * 100
        }
    
    def closed(self, reason):
        final_stats = self.get_stats()
        self.logger.info(
            "Spider closed",
            reason=reason,
            **final_stats
        )