#!/usr/bin/env python3
"""
Web utilities for URL handling and processing.
"""

import re
import requests
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from .network import create_session_with_retries

def extract_actual_url_from_wayback(url, verbose=False):
    """Extract the actual paper URL from a wayback or archive.org URL."""
    if not url:
        return url
    
    # If the URL contains multiple comma-separated URLs, process each one
    if ',' in url and not (url.startswith('"') and url.endswith('"')):
        urls = url.split(',')
        processed_urls = []
        for single_url in urls:
            processed_url = extract_actual_url_from_wayback(single_url.strip(), verbose)
            if processed_url:
                processed_urls.append(processed_url)
        
        if processed_urls:
            if verbose and len(processed_urls) > 1:
                print(f"Processed multiple URLs: {processed_urls}")
            return ','.join(processed_urls)
    
    # Patterns for archive.org wayback URLs
    wayback_patterns = [
        # Handle URLs where the captured URL is the rest of the string without explicit separator
        r'https?://scholar\.archive\.org/work/[^/]+/access/wayback/(https?://.+)$',
        r'https?://web\.archive\.org/web/\d+/(https?://.+)$',
        r'https?://archive\.org/[^/]+/[^/]+/(https?://.+)$'
    ]
    
    for pattern in wayback_patterns:
        match = re.search(pattern, url)
        if match:
            actual_url = match.group(1)
            if verbose:
                print(f"Extracted actual URL from archive.org: {actual_url}")
            return actual_url
    
    # Special handling for scholar.archive.org URLs where standard regex fails
    if 'scholar.archive.org/work/' in url and '/access/wayback/' in url:
        try:
            # Split URL at the wayback marker
            parts = url.split('/access/wayback/')
            if len(parts) == 2 and parts[1].startswith('http'):
                actual_url = parts[1]
                if verbose:
                    print(f"Extracted actual URL using special handling: {actual_url}")
                return actual_url
        except Exception as e:
            if verbose:
                print(f"Error in special handling for archive.org URL: {e}")
    
    return url

def convert_to_direct_download_url(url, verbose=False):
    """Convert URLs to direct download links where possible."""
    if not url:
        return url
    
    # Handle arXiv URLs - convert from abstract page to direct PDF
    # e.g., https://arxiv.org/abs/2401.00434 -> https://arxiv.org/pdf/2401.00434.pdf
    arxiv_pattern = r'https?://arxiv\.org/abs/([0-9]+\.[0-9]+(?:v[0-9]+)?)'
    match = re.search(arxiv_pattern, url)
    if match:
        paper_id = match.group(1)
        pdf_url = f"https://arxiv.org/pdf/{paper_id}.pdf"
        if verbose:
            print(f"Converted arXiv URL to direct PDF: {pdf_url}")
        return pdf_url
    
    # Handle arXiv DOI URLs
    # e.g., https://doi.org/10.48550/arXiv.2401.00434 -> https://arxiv.org/pdf/2401.00434.pdf
    arxiv_doi_pattern = r'https?://doi\.org/10\.48550/arXiv\.([0-9]+\.[0-9]+(?:v[0-9]+)?)'
    match = re.search(arxiv_doi_pattern, url)
    if match:
        paper_id = match.group(1)
        pdf_url = f"https://arxiv.org/pdf/{paper_id}.pdf"
        if verbose:
            print(f"Converted arXiv DOI URL to direct PDF: {pdf_url}")
        return pdf_url
    
    return url

def check_pdf_url(url, verbose=False, timeout=5):
    """Check if a URL points to a PDF file."""
    if not url:
        return False
    
    # Handle multiple comma-separated URLs
    if ',' in url:
        urls = url.split(',')
        for single_url in urls:
            if check_pdf_url(single_url.strip(), verbose, timeout):
                return True
        return False
    
    # Skip SSRN URLs as they are inaccessible in some regions
    if 'papers.ssrn.com' in url:
        if verbose:
            print(f"Skipping SSRN URL as it may be inaccessible: {url}")
        return False
    
    # Extract actual URL if it's an archive.org URL
    url = extract_actual_url_from_wayback(url, verbose)
    
    if verbose:
        print(f"Checking if URL is PDF: {url}")
    
    # If URL ends with .pdf, it's likely a PDF
    if url.lower().endswith('.pdf'):
        if verbose:
            print(f"URL ends with .pdf: {url}")
        return True
    
    # If URL contains DOI and looks like a PDF link from a major publisher
    if '/doi/pdf/' in url.lower() or '/pdf/' in url.lower():
        if verbose:
            print(f"URL contains PDF path: {url}")
        return True
    
    try:
        # Send a HEAD request to check content type with short timeout
        session = create_session_with_retries()
        response = session.head(url, timeout=timeout, allow_redirects=True, verify=False)
        content_type = response.headers.get('Content-Type', '').lower()
        
        if verbose:
            print(f"Content-Type for {url}: {content_type}")
        
        # Check if content type is PDF
        is_pdf = 'application/pdf' in content_type
        
        if is_pdf and verbose:
            print(f"URL confirmed as PDF: {url}")
            
        return is_pdf
    except requests.exceptions.Timeout:
        if verbose:
            print(f"Timeout checking PDF URL {url} - will try direct download instead")
        return False
    except Exception as e:
        if verbose:
            print(f"Error checking PDF URL {url}: {e}")
        return False

def search_arxiv_by_title(title, verbose=False):
    """Search for a paper on arxiv.org by its title and return the PDF URL if found."""
    if not title:
        return None
    
    if verbose:
        print(f"Searching arXiv for: {title}")
    
    # Clean up the title for the search query
    cleaned_title = title.replace(':', ' ').replace('-', ' ').replace('/', ' ')
    query = '+'.join(cleaned_title.split())
    search_url = f"https://arxiv.org/search/?query={query}&searchtype=title"
    
    try:
        if verbose:
            print(f"Searching with URL: {search_url}")
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        session = create_session_with_retries()
        response = session.get(search_url, headers=headers, timeout=30)
        response.raise_for_status()
        
        # Parse the HTML response
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # Find search results
        results = soup.find_all('li', class_='arxiv-result')
        
        if not results:
            if verbose:
                print("No results found on arXiv")
            return None
        
        if verbose:
            print(f"Found {len(results)} potential matches on arXiv")
        
        # Function to calculate similarity between titles
        def title_similarity(title1, title2):
            """Calculate similarity between two titles."""
            if not title1 or not title2:
                return 0
            
            t1 = title1.lower()
            t2 = title2.lower()
            words1 = set(t1.split())
            words2 = set(t2.split())
            common_words = words1.intersection(words2)
            
            if not words1 or not words2:
                return 0
            
            return len(common_words) / max(len(words1), len(words2))
        
        best_match = None
        best_similarity = 0
        
        for result in results:
            # Extract the title
            title_element = result.find('p', class_='title')
            if not title_element:
                continue
            
            result_title = title_element.text.strip()
            similarity = title_similarity(title, result_title)
            
            if verbose:
                print(f"Comparing: '{title}' with '{result_title}' - Similarity: {similarity:.2f}")
            
            if similarity > best_similarity and similarity > 0.6:  # Threshold for a good match
                best_similarity = similarity
                
                # Find the arXiv ID - try multiple methods
                
                # Method 1: Find the link to the abstract page
                link_element = result.find('a', {'href': True, 'title': 'Abstract'})
                if not link_element:
                    # Method 2: Find the link containing 'abs' in the href
                    links = result.find_all('a', href=lambda href: href and '/abs/' in href)
                    if links:
                        link_element = links[0]
                    else:
                        # Method 3: Find the link in the title element
                        title_links = title_element.find_all('a', href=True)
                        if title_links:
                            link_element = title_links[0]
                
                if link_element and link_element.get('href'):
                    href = link_element['href']
                    # Print the href for debugging
                    if verbose:
                        print(f"Found link with href: {href}")
                    
                    # Extract arXiv ID from URL - improved regex to catch more formats
                    match = re.search(r'/abs/([0-9]{4}\.[0-9]{4,5}(?:v[0-9]+)?)', href)
                    if match:
                        arxiv_id = match.group(1)
                        if verbose:
                            print(f"Extracted arXiv ID: {arxiv_id}")
                        best_match = {
                            'title': result_title,
                            'id': arxiv_id,
                            'similarity': similarity,
                            'pdf_url': f"https://arxiv.org/pdf/{arxiv_id}.pdf"
                        }
                    else:
                        # Alternative extraction method if regex fails
                        parts = href.split('/abs/')
                        if len(parts) > 1:
                            possible_id = parts[1].split('v')[0]
                            if re.match(r'[0-9]{4}\.[0-9]{4,5}', possible_id):
                                arxiv_id = possible_id
                                if verbose:
                                    print(f"Extracted arXiv ID using alternative method: {arxiv_id}")
                                best_match = {
                                    'title': result_title,
                                    'id': arxiv_id,
                                    'similarity': similarity,
                                    'pdf_url': f"https://arxiv.org/pdf/{arxiv_id}.pdf"
                                }
                else:
                    if verbose:
                        print(f"Could not find abstract link for paper: {result_title}")
        
        if best_match:
            if verbose:
                print(f"Best match: {best_match['title']} (Similarity: {best_match['similarity']:.2f})")
                print(f"arXiv ID: {best_match['id']}")
                print(f"PDF URL: {best_match['pdf_url']}")
            return best_match['pdf_url']
        else:
            if verbose:
                print(f"Could not extract arXiv ID for any matching papers")
            return None
    
    except Exception as e:
        if verbose:
            print(f"Error searching arXiv: {e}")
        return None 