from typing import Dict, List, Optional, Any, Union
from enum import Enum
from pydantic import BaseModel, Field, HttpUrl


class SelectorType(str, Enum):
    """Type of selector to use for extracting elements."""
    
    CSS = "css"
    XPATH = "xpath"
    REGEX = "regex"


class ElementSelector(BaseModel):
    """Selector for extracting elements from content."""
    
    name: str = Field(..., description="Name for the extracted data")
    selector_type: SelectorType = Field(default=SelectorType.CSS, description="Type of selector")
    selector: str = Field(..., description="Selector string")
    attribute: Optional[str] = Field(default=None, description="Attribute to extract (leave empty for text content)")
    multiple: bool = Field(default=False, description="Whether to extract multiple elements")


class CrawlRequest(BaseModel):
    """Request for crawling a URL."""
    
    url: HttpUrl = Field(..., description="URL to crawl")
    method: str = Field(default="GET", description="HTTP method to use")
    headers: Optional[Dict[str, str]] = Field(default=None, description="HTTP headers to include")
    params: Optional[Dict[str, Any]] = Field(default=None, description="Query parameters to include")
    data: Optional[Any] = Field(default=None, description="Data to send with the request")
    cookies: Optional[Dict[str, str]] = Field(default=None, description="Cookies to include")
    allow_redirects: bool = Field(default=True, description="Whether to follow redirects")
    timeout: Optional[int] = Field(default=None, description="Request timeout in seconds")
    selectors: Optional[List[ElementSelector]] = Field(default=None, description="Elements to extract from the response")
    follow_links: bool = Field(default=False, description="Whether to follow links in the response")
    max_depth: int = Field(default=1, description="Maximum depth for following links")
    same_domain_only: bool = Field(default=True, description="Whether to only follow links to the same domain")
    save_to_disk: bool = Field(default=False, description="Whether to save the response to disk")


class CrawlResult(BaseModel):
    """Result of a single crawl operation."""
    
    url: str = Field(..., description="URL that was crawled")
    status_code: Optional[int] = Field(default=None, description="HTTP status code")
    headers: Optional[Dict[str, str]] = Field(default=None, description="Response headers")
    content_type: Optional[str] = Field(default=None, description="Content type of the response")
    encoding: Optional[str] = Field(default=None, description="Encoding of the response")
    extracted_data: Dict[str, Any] = Field(default_factory=dict, description="Data extracted from the response")
    links: Optional[List[str]] = Field(default=None, description="Links found in the response")
    error: Optional[str] = Field(default=None, description="Error message if the crawl failed")
    elapsed: Optional[float] = Field(default=None, description="Time taken to crawl in seconds")


class CrawlResponse(BaseModel):
    """Response for a crawl request."""
    
    results: List[CrawlResult] = Field(default_factory=list, description="Results of the crawl")
    initial_url: str = Field(..., description="URL that was initially requested")
    success: bool = Field(default=True, description="Whether the crawl was successful")
    error: Optional[str] = Field(default=None, description="Error message if the crawl failed")
    total_elapsed: Optional[float] = Field(default=None, description="Total time taken to crawl in seconds")
    urls_crawled: int = Field(default=0, description="Number of URLs crawled")
    urls_failed: int = Field(default=0, description="Number of URLs that failed to crawl") 