from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
from typing import Dict, Optional, List, Set


def normalize_url(url: str, 
                 remove_query: bool = False, 
                 remove_fragment: bool = True,
                 remove_default_port: bool = True,
                 sort_query_params: bool = True,
                 remove_query_params: Optional[List[str]] = None) -> str:
    """Normalize a URL to a standard format.
    
    Args:
        url: The URL to normalize.
        remove_query: Whether to remove the query component.
        remove_fragment: Whether to remove the fragment component.
        remove_default_port: Whether to remove default ports (80 for HTTP, 443 for HTTPS).
        sort_query_params: Whether to sort query parameters alphabetically.
        remove_query_params: List of query parameters to remove.
        
    Returns:
        Normalized URL.
    """
    if not url:
        return ""
        
    # Parse the URL
    parsed = urlparse(url)
    
    # Create a new parsed URL with normalized components
    scheme = parsed.scheme.lower()
    netloc = parsed.netloc.lower()
    
    # Remove default ports
    if remove_default_port:
        if (scheme == "http" and netloc.endswith(":80")) or (scheme == "https" and netloc.endswith(":443")):
            netloc = netloc.rsplit(":", 1)[0]
    
    # Handle query parameters
    query = parsed.query
    if query and not remove_query:
        if sort_query_params or remove_query_params:
            # Parse query parameters
            query_params = parse_qs(query)
            
            # Remove specified query parameters
            if remove_query_params:
                for param in remove_query_params:
                    if param in query_params:
                        del query_params[param]
            
            # Rebuild query string with sorted parameters
            if sort_query_params:
                query = urlencode([(k, v) for k, vs in sorted(query_params.items()) for v in vs], doseq=True)
            else:
                query = urlencode([(k, v) for k, vs in query_params.items() for v in vs], doseq=True)
    elif remove_query:
        query = ""
    
    # Handle fragment
    fragment = "" if remove_fragment else parsed.fragment
    
    # Rebuild the URL
    normalized_url = urlunparse((
        scheme,
        netloc,
        parsed.path,
        parsed.params,
        query,
        fragment
    ))
    
    # Ensure the URL has a trailing slash if it's just a domain
    if normalized_url and not parsed.path and not normalized_url.endswith("/"):
        normalized_url += "/"
    
    return normalized_url


def extract_domain(url: str, include_subdomain: bool = True) -> str:
    """Extract the domain from a URL.
    
    Args:
        url: The URL to extract the domain from.
        include_subdomain: Whether to include subdomains in the result.
        
    Returns:
        The domain extracted from the URL.
    """
    if not url:
        return ""
        
    parsed = urlparse(url)
    netloc = parsed.netloc.lower()
    
    if not include_subdomain:
        # Extract only the registered domain (without subdomains)
        parts = netloc.split(".")
        if len(parts) > 2:
            # Check for country-specific TLDs like .co.uk, .com.au
            if len(parts[-2]) <= 3 and len(parts[-1]) <= 3:
                if len(parts) > 3:
                    return ".".join(parts[-3:])
                return ".".join(parts[-2:])
            return ".".join(parts[-2:])
        return netloc
    
    return netloc


def is_same_domain(url1: str, url2: str, include_subdomain: bool = True) -> bool:
    """Check if two URLs belong to the same domain.
    
    Args:
        url1: The first URL.
        url2: The second URL.
        include_subdomain: Whether to compare subdomains.
        
    Returns:
        True if both URLs belong to the same domain, False otherwise.
    """
    domain1 = extract_domain(url1, include_subdomain)
    domain2 = extract_domain(url2, include_subdomain)
    
    return domain1 == domain2


def join_url(base_url: str, relative_url: str) -> str:
    """Join a base URL and a relative URL.
    
    Args:
        base_url: The base URL.
        relative_url: The relative URL to join.
        
    Returns:
        The joined URL.
    """
    from urllib.parse import urljoin
    return urljoin(base_url, relative_url)


def extract_urls_from_text(text: str) -> Set[str]:
    """Extract URLs from text.
    
    Args:
        text: The text to extract URLs from.
        
    Returns:
        Set of extracted URLs.
    """
    import re
    
    # Regular expression for URLs
    url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\-.~:/?#[\]@!$&\'()*+,;=]*'
    
    # Find all matches
    matches = re.findall(url_pattern, text)
    
    return set(matches) 