import time
import uuid
import asyncio
from fastapi import BackgroundTasks, HTTPException, Depends
from typing import Dict, Any, Optional

from app.modules.crawler.schemas import CrawlRequest, CrawlResponse, CrawlResult
from app.modules.crawler.services.crawler import AsyncHTTPCrawler
from app.modules.crawler.services.parser import HTMLParser, JSONParser
from app.modules.crawler.config import CrawlerSettings


# In-memory storage for background tasks (in a real application, use Redis or a database)
background_tasks: Dict[str, Dict[str, Any]] = {}


async def crawl(request: CrawlRequest) -> CrawlResponse:
    """Crawl a URL and extract data.
    
    Args:
        request: Crawl request parameters.
        
    Returns:
        Response containing the crawl results.
    """
    # Start time
    start_time = time.time()
    
    # Initialize response
    response = CrawlResponse(
        initial_url=str(request.url),
        results=[],
        success=True,
        urls_crawled=0,
        urls_failed=0
    )
    
    try:
        # Create a crawler instance
        async with AsyncHTTPCrawler(CrawlerSettings()) as crawler:
            # Create a parser instances
            html_parser = HTMLParser()
            json_parser = JSONParser()
            
            # Initialize crawl queue and visited set
            queue = [(str(request.url), 0)]  # (url, depth)
            visited = set()
            
            # Process the queue
            while queue and len(visited) < 100:  # Limit to 100 URLs for safety
                url, depth = queue.pop(0)
                
                if url in visited:
                    continue
                    
                visited.add(url)
                
                # Skip if we've reached max depth and this isn't the initial URL
                if depth > request.max_depth and url != str(request.url):
                    continue
                
                # Crawl the URL
                try:
                    # Set custom timeout if provided
                    timeout = request.timeout if request.timeout else None
                    
                    # Fetch the content
                    start_fetch_time = time.time()
                    
                    content = await crawler.fetch(
                        url=url,
                        method=request.method,
                        headers=request.headers,
                        params=request.params,
                        data=request.data,
                        cookies=request.cookies,
                        allow_redirects=request.allow_redirects,
                        save=request.save_to_disk
                    )
                    
                    elapsed = time.time() - start_fetch_time
                    
                    # Create result
                    result = CrawlResult(
                        url=url,
                        status_code=200,  # Assuming success
                        elapsed=elapsed,
                        extracted_data={}
                    )
                    
                    # Extract content based on Content-Type
                    content_type = "text/html"  # Default
                    
                    # Process the content
                    if content:
                        # Extract data using selectors
                        if request.selectors:
                            # Parse the content based on content type
                            if "json" in content_type.lower():
                                parsed_content = json_parser.parse(content)
                                for selector in request.selectors:
                                    if selector.selector_type == "css" or selector.selector_type == "xpath":
                                        # Not applicable for JSON
                                        continue
                                    
                                    value = json_parser.extract(parsed_content, selector.selector)
                                    result.extracted_data[selector.name] = value
                            else:
                                # Parse as HTML
                                soup = html_parser.parse(content)
                                
                                # Extract metadata
                                metadata = html_parser.extract_metadata(soup)
                                if metadata:
                                    result.extracted_data["metadata"] = metadata
                                
                                # Apply selectors
                                for selector in request.selectors:
                                    if selector.selector_type == "css":
                                        if selector.multiple:
                                            elements = html_parser.extract_elements(soup, selector.selector)
                                            if selector.attribute:
                                                values = [elem.get(selector.attribute) for elem in elements if elem.has_attr(selector.attribute)]
                                            else:
                                                values = [elem.get_text(strip=True) for elem in elements]
                                            result.extracted_data[selector.name] = values
                                        else:
                                            element = soup.select_one(selector.selector)
                                            if element:
                                                if selector.attribute:
                                                    value = element.get(selector.attribute)
                                                else:
                                                    value = element.get_text(strip=True)
                                                result.extracted_data[selector.name] = value
                                    elif selector.selector_type == "xpath":
                                        # Not directly supported by BeautifulSoup, could use lxml
                                        pass
                                    elif selector.selector_type == "regex":
                                        import re
                                        matches = re.findall(selector.selector, str(content))
                                        if matches:
                                            result.extracted_data[selector.name] = matches if selector.multiple else matches[0]
                                
                                # Extract links if needed
                                if request.follow_links and depth < request.max_depth:
                                    links = html_parser.extract_links(soup, base_url=url)
                                    result.links = links
                                    
                                    # Add links to the queue
                                    for link in links:
                                        if request.same_domain_only:
                                            from app.modules.crawler.utils.url import is_same_domain
                                            if not is_same_domain(link, url):
                                                continue
                                        
                                        queue.append((link, depth + 1))
                    
                    # Add result to response
                    response.results.append(result)
                    response.urls_crawled += 1
                    
                except Exception as e:
                    # Handle errors
                    result = CrawlResult(
                        url=url,
                        error=str(e)
                    )
                    response.results.append(result)
                    response.urls_failed += 1
                    
            # Set total elapsed time
            response.total_elapsed = time.time() - start_time
            
    except Exception as e:
        # Handle overall errors
        response.success = False
        response.error = str(e)
        
    return response


async def crawl_background(request: CrawlRequest, background_tasks: BackgroundTasks) -> Dict[str, Any]:
    """Start a background task to crawl a URL.
    
    Args:
        request: Crawl request parameters.
        background_tasks: FastAPI background tasks object.
        
    Returns:
        Dictionary with task ID and status.
    """
    # Generate task ID
    task_id = str(uuid.uuid4())
    
    # Store initial task status
    background_tasks[task_id] = {
        "status": "pending",
        "created_at": time.time(),
        "request": request.dict(),
        "result": None
    }
    
    # Add task to background tasks
    background_tasks.add_task(_run_background_crawl, task_id, request)
    
    # Return task ID
    return {
        "task_id": task_id,
        "status": "pending",
        "message": "Crawl task started"
    }


async def _run_background_crawl(task_id: str, request: CrawlRequest) -> None:
    """Run a crawl task in the background.
    
    Args:
        task_id: ID of the task.
        request: Crawl request parameters.
    """
    try:
        # Update task status
        background_tasks[task_id]["status"] = "running"
        
        # Run the crawl
        result = await crawl(request)
        
        # Update task with result
        background_tasks[task_id]["status"] = "completed"
        background_tasks[task_id]["completed_at"] = time.time()
        background_tasks[task_id]["result"] = result.dict()
        
    except Exception as e:
        # Update task with error
        background_tasks[task_id]["status"] = "failed"
        background_tasks[task_id]["completed_at"] = time.time()
        background_tasks[task_id]["error"] = str(e)


async def get_crawl_status(task_id: str) -> Dict[str, Any]:
    """Get the status of a background crawl task.
    
    Args:
        task_id: ID of the task.
        
    Returns:
        Dictionary with task status and result if completed.
    """
    # Check if task exists
    if task_id not in background_tasks:
        raise HTTPException(status_code=404, detail=f"Task {task_id} not found")
    
    # Get task data
    task_data = background_tasks[task_id]
    
    # Return status and result if completed
    response = {
        "task_id": task_id,
        "status": task_data["status"],
        "created_at": task_data["created_at"]
    }
    
    if task_data["status"] in ("completed", "failed"):
        response["completed_at"] = task_data.get("completed_at")
        
        if task_data["status"] == "completed":
            response["result"] = task_data.get("result")
        else:
            response["error"] = task_data.get("error")
    
    return response 