import json
from typing import Any, Dict, List, Union, Optional, Callable
from bs4 import BeautifulSoup
import re


class HTMLParser:
    """Parser for HTML content."""
    
    def __init__(self, parser: str = "html.parser"):
        """Initialize the HTML parser.
        
        Args:
            parser: The parser to use with BeautifulSoup.
        """
        self.parser_type = parser
    
    def parse(self, content: Union[str, bytes], encoding: str = "utf-8") -> BeautifulSoup:
        """Parse HTML content into a BeautifulSoup object.
        
        Args:
            content: HTML content to parse.
            encoding: Character encoding of the content.
            
        Returns:
            BeautifulSoup object.
        """
        if isinstance(content, bytes):
            content = content.decode(encoding, errors="replace")
        return BeautifulSoup(content, self.parser_type)
    
    def extract_links(self, soup: BeautifulSoup, base_url: Optional[str] = None, 
                      filter_by: Optional[Dict[str, Any]] = None) -> List[str]:
        """Extract links from the HTML content.
        
        Args:
            soup: BeautifulSoup object.
            base_url: Base URL to resolve relative links.
            filter_by: Dictionary of attributes to filter links by.
            
        Returns:
            List of extracted links.
        """
        links = []
        
        # Get all anchor tags
        anchors = soup.find_all("a", attrs=filter_by or {})
        
        for anchor in anchors:
            if "href" in anchor.attrs:
                href = anchor["href"]
                
                # Skip javascript: links or empty links
                if href.startswith(("javascript:", "#")) or not href:
                    continue
                
                # Resolve relative URLs if base_url is provided
                if base_url and not href.startswith(("http://", "https://")):
                    if href.startswith("/"):
                        # Absolute path from domain root
                        from urllib.parse import urlparse
                        parsed_base = urlparse(base_url)
                        href = f"{parsed_base.scheme}://{parsed_base.netloc}{href}"
                    else:
                        # Relative path
                        from urllib.parse import urljoin
                        href = urljoin(base_url, href)
                
                links.append(href)
        
        return links
    
    def extract_text(self, soup: BeautifulSoup, selector: str = None) -> str:
        """Extract text from the HTML content.
        
        Args:
            soup: BeautifulSoup object.
            selector: CSS selector to extract text from specific elements.
            
        Returns:
            Extracted text.
        """
        if selector:
            elements = soup.select(selector)
            result = " ".join([elem.get_text(strip=True) for elem in elements])
        else:
            result = soup.get_text(separator=" ", strip=True)
            
        # Clean up text: remove excess whitespace
        return re.sub(r'\s+', ' ', result).strip()
    
    def extract_elements(self, soup: BeautifulSoup, selector: str) -> List[Any]:
        """Extract elements from the HTML content using a CSS selector.
        
        Args:
            soup: BeautifulSoup object.
            selector: CSS selector to extract elements.
            
        Returns:
            List of extracted elements.
        """
        return soup.select(selector)
    
    def extract_tables(self, soup: BeautifulSoup) -> List[List[List[str]]]:
        """Extract tables from the HTML content.
        
        Args:
            soup: BeautifulSoup object.
            
        Returns:
            List of tables, where each table is a list of rows, and each row is a list of cells.
        """
        tables = []
        for table in soup.find_all("table"):
            table_data = []
            rows = table.find_all("tr")
            
            for row in rows:
                cells = row.find_all(["th", "td"])
                row_data = [cell.get_text(strip=True) for cell in cells]
                table_data.append(row_data)
                
            tables.append(table_data)
            
        return tables
    
    def extract_attributes(self, soup: BeautifulSoup, selector: str, attribute: str) -> List[str]:
        """Extract attributes from elements in the HTML content.
        
        Args:
            soup: BeautifulSoup object.
            selector: CSS selector to select elements.
            attribute: Attribute name to extract.
            
        Returns:
            List of attribute values.
        """
        elements = soup.select(selector)
        return [elem.get(attribute) for elem in elements if elem.has_attr(attribute)]
    
    def extract_images(self, soup: BeautifulSoup, base_url: Optional[str] = None) -> List[Dict[str, str]]:
        """Extract image information from the HTML content.
        
        Args:
            soup: BeautifulSoup object.
            base_url: Base URL to resolve relative image URLs.
            
        Returns:
            List of dictionaries containing image information.
        """
        images = []
        
        for img in soup.find_all("img"):
            image_info = {}
            
            # Extract src
            if img.has_attr("src"):
                src = img["src"]
                # Resolve relative URLs if base_url is provided
                if base_url and not src.startswith(("http://", "https://")):
                    from urllib.parse import urljoin
                    src = urljoin(base_url, src)
                image_info["src"] = src
            else:
                continue  # Skip images without src
            
            # Extract alt text
            if img.has_attr("alt"):
                image_info["alt"] = img["alt"]
            
            # Extract title
            if img.has_attr("title"):
                image_info["title"] = img["title"]
                
            images.append(image_info)
            
        return images
    
    def extract_metadata(self, soup: BeautifulSoup) -> Dict[str, str]:
        """Extract metadata from the HTML content.
        
        Args:
            soup: BeautifulSoup object.
            
        Returns:
            Dictionary containing metadata.
        """
        metadata = {}
        
        # Extract title
        title_tag = soup.find("title")
        if title_tag:
            metadata["title"] = title_tag.get_text(strip=True)
        
        # Extract meta tags
        for meta in soup.find_all("meta"):
            if meta.has_attr("name") and meta.has_attr("content"):
                metadata[meta["name"]] = meta["content"]
            elif meta.has_attr("property") and meta.has_attr("content"):
                metadata[meta["property"]] = meta["content"]
                
        return metadata


class JSONParser:
    """Parser for JSON content."""
    
    def parse(self, content: Union[str, bytes, Dict, List], encoding: str = "utf-8") -> Any:
        """Parse JSON content into a Python object.
        
        Args:
            content: JSON content to parse.
            encoding: Character encoding of the content.
            
        Returns:
            Parsed JSON object.
        """
        if isinstance(content, bytes):
            content = content.decode(encoding, errors="replace")
            
        if isinstance(content, (dict, list)):
            return content
            
        try:
            return json.loads(content)
        except json.JSONDecodeError as e:
            raise ValueError(f"Invalid JSON content: {str(e)}")
    
    def extract(self, data: Any, path: str) -> Any:
        """Extract a value from a JSON object using a dot notation path.
        
        Args:
            data: Parsed JSON object.
            path: Dot notation path to the value (e.g., "data.items.0.name").
            
        Returns:
            Extracted value.
        """
        if not path:
            return data
            
        parts = path.split(".")
        result = data
        
        for part in parts:
            if isinstance(result, dict) and part in result:
                result = result[part]
            elif isinstance(result, list) and part.isdigit() and int(part) < len(result):
                result = result[int(part)]
            else:
                return None
                
        return result
    
    def transform(self, data: Any, transform_func: Callable[[Any], Any]) -> Any:
        """Transform a JSON object using a transformation function.
        
        Args:
            data: Parsed JSON object.
            transform_func: Function to transform the data.
            
        Returns:
            Transformed data.
        """
        return transform_func(data)
    
    def flatten(self, data: Dict[str, Any], separator: str = "_") -> Dict[str, Any]:
        """Flatten a nested JSON object into a single-level dictionary.
        
        Args:
            data: Nested JSON object.
            separator: Separator for nested keys.
            
        Returns:
            Flattened dictionary.
        """
        result = {}
        
        def _flatten(obj, prefix=""):
            if isinstance(obj, dict):
                for key, value in obj.items():
                    new_prefix = f"{prefix}{separator}{key}" if prefix else key
                    _flatten(value, new_prefix)
            elif isinstance(obj, list):
                for i, item in enumerate(obj):
                    new_prefix = f"{prefix}{separator}{i}" if prefix else str(i)
                    _flatten(item, new_prefix)
            else:
                result[prefix] = obj
                
        _flatten(data)
        return result 