#!/usr/bin/env python3
"""
Paper downloading utilities.
"""

import os
import re
from pathlib import Path
from bs4 import BeautifulSoup

from .network import create_session_with_retries
from .web_utils import extract_actual_url_from_wayback, convert_to_direct_download_url
from .file_utils import is_valid_pdf_file

def try_download_paper(paper, download_dir='papers', verbose=False):
    """Try to download a paper and return its status."""
    # Create download directory if it doesn't exist
    Path(download_dir).mkdir(exist_ok=True)
    
    status = "undo"
    
    # Skip SSRN URLs and Google-related URLs
    if paper['download_url']:
        if ('papers.ssrn.com' in paper['download_url'] or 
            'books.google.' in paper['download_url'] or 
            'scholar.google.' in paper['download_url']):
            if verbose:
                print(f"Skipping URL as it may be inaccessible or not useful: {paper['download_url']}")
            return "inaccessible"
    if paper['website_url']:
        if ('papers.ssrn.com' in paper['website_url'] or 
            'books.google.' in paper['website_url'] or 
            'scholar.google.' in paper['website_url']):
            if verbose:
                print(f"Skipping URL as it may be inaccessible or not useful: {paper['website_url']}")
            return "inaccessible"
    
    # Function to handle direct download attempts
    def attempt_download(url, filename):
        try:
            # Skip SSRN and Google URLs
            if ('papers.ssrn.com' in url or 
                'books.google.' in url or 
                'scholar.google.' in url):
                if verbose:
                    print(f"Skipping URL as it may be inaccessible or not useful: {url}")
                return False
                
            # First, extract actual URL if it's an archive.org URL
            clean_url = extract_actual_url_from_wayback(url, verbose)
            
            # Convert to direct download URL if possible (e.g., arXiv)
            clean_url = convert_to_direct_download_url(clean_url, verbose)
            
            if verbose:
                if clean_url != url:
                    print(f"Using direct download URL: {clean_url}")
                print(f"Attempting direct download from: {clean_url}")
                
            session = create_session_with_retries()
            response = session.get(clean_url, stream=True, timeout=5, verify=False)
            response.raise_for_status()
            
            # Check content type to warn about potential non-PDF downloads
            content_type = response.headers.get('Content-Type', '').lower()
            if 'application/pdf' not in content_type and 'application/octet-stream' not in content_type:
                if verbose:
                    print(f"Warning: Content-Type is not PDF: {content_type}")
            
            with open(filename, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            
            # Just check if file exists and is not empty
            if os.path.exists(filename) and os.path.getsize(filename) > 0:
                return True
            else:
                if verbose:
                    print(f"Downloaded file is empty or missing: {filename}")
                # Remove the invalid file
                if os.path.exists(filename):
                    os.remove(filename)
                    if verbose:
                        print(f"Removed invalid file: {filename}")
                return False
                
        except Exception as e:
            if verbose:
                print(f"Download attempt failed for {url}: {e}")
            # Clean up any partially downloaded file
            if os.path.exists(filename):
                os.remove(filename)
                if verbose:
                    print(f"Removed partial download: {filename}")
            return False
    
    # Special handling for arXiv URLs - prioritize direct PDF download
    if ((paper['website_url'] and ('arxiv.org/abs/' in paper['website_url'] or 
                                 'doi.org/10.48550/arXiv.' in paper['website_url'])) or
        (paper['download_url'] and ('arxiv.org/abs/' in paper['download_url'] or 
                                   'doi.org/10.48550/arXiv.' in paper['download_url']))):
        if verbose:
            print(f"ArXiv URL detected, attempting direct PDF download")
        
        sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
        filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
        
        url_to_use = paper['download_url'] if ('arxiv.org/abs/' in paper['download_url'] or 
                                              'doi.org/10.48550/arXiv.' in paper['download_url']) else paper['website_url']
        
        direct_url = convert_to_direct_download_url(url_to_use, verbose)
        if attempt_download(direct_url, filename):
            if verbose:
                print(f"Successfully downloaded arXiv paper to: {filename}")
            return "downloaded"
    
    sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
    filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
    
    # First try the download_url if available
    if paper['download_url']:
        # Handle multiple comma-separated URLs
        if ',' in paper['download_url']:
            urls = paper['download_url'].split(',')
            
            # First, try URLs that appear to be PDFs
            pdf_urls = [url.strip() for url in urls if url.strip().lower().endswith('.pdf') or '/pdf/' in url.strip().lower()]
            other_urls = [url.strip() for url in urls if url.strip() not in pdf_urls and url.strip()]
            
            # Filter out SSRN URLs
            pdf_urls = [url for url in pdf_urls if 'papers.ssrn.com' not in url]
            other_urls = [url for url in other_urls if 'papers.ssrn.com' not in url]
            
            if verbose and pdf_urls:
                print(f"Found {len(pdf_urls)} direct PDF URLs to try first")
            
            # Try PDF URLs first
            for pdf_url in pdf_urls:
                if verbose:
                    print(f"Trying direct PDF URL: {pdf_url}")
                
                if attempt_download(pdf_url, filename):
                    if verbose:
                        print(f"Successfully downloaded from PDF URL: {pdf_url} to: {filename}")
                    return "downloaded"
            
            # Then try other URLs
            for other_url in other_urls:
                if verbose:
                    print(f"Trying non-PDF URL: {other_url}")
                
                if attempt_download(other_url, filename):
                    if verbose:
                        print(f"Successfully downloaded from URL: {other_url} to: {filename}")
                    return "downloaded"
            
            # If all download_urls failed, try the website_url as a fallback
            if paper['website_url'] and not any(paper['website_url'] == url for url in urls) and 'papers.ssrn.com' not in paper['website_url']:
                if verbose:
                    print(f"All download URLs failed, trying website URL: {paper['website_url']}")
                
                if attempt_download(paper['website_url'], filename):
                    if verbose:
                        print(f"Successfully downloaded from website URL: {paper['website_url']} to: {filename}")
                    return "downloaded"
            
            # If we're here, all URLs failed
            status = "download failed"
        else:
            # Single URL - check if it's a PDF
            is_likely_pdf = paper['download_url'].lower().endswith('.pdf') or '/pdf/' in paper['download_url'].lower()
            
            if is_likely_pdf and verbose:
                print(f"URL appears to be a direct PDF: {paper['download_url']}")
            
            if verbose:
                print(f"Attempting to download: {paper['download_url']}")
            
            # Directly attempt download
            if attempt_download(paper['download_url'], filename):
                if verbose:
                    print(f"Successfully downloaded to: {filename}")
                return "downloaded"
            
            # If download_url failed, try website_url as a fallback
            if paper['website_url'] and paper['website_url'] != paper['download_url'] and 'papers.ssrn.com' not in paper['website_url']:
                if verbose:
                    print(f"Download URL failed, trying website URL: {paper['website_url']}")
                
                if attempt_download(paper['website_url'], filename):
                    if verbose:
                        print(f"Successfully downloaded from website URL: {paper['website_url']} to: {filename}")
                    return "downloaded"
            
            status = "download failed"
    
    # If no download_url, try website_url directly
    elif paper['website_url'] and 'papers.ssrn.com' not in paper['website_url']:
        if verbose:
            print(f"No download URL, trying website URL directly: {paper['website_url']}")
        
        if attempt_download(paper['website_url'], filename):
            if verbose:
                print(f"Successfully downloaded from website URL: {paper['website_url']} to: {filename}")
            return "downloaded"
        
        status = "download failed"
    else:
        if verbose:
            print(f"No download URL or website URL available for: {paper['title']}")
        status = "download failed"
        
    return status

def try_download_from_scihub(paper, download_dir='papers', verbose=False):
    """Try to download a paper from Sci-Hub using its DOI."""
    if not paper['doi']:
        if verbose:
            print(f"No DOI available for Sci-Hub download: {paper['title']}")
        return "download failed"
    
    try:
        # Create download directory if it doesn't exist
        Path(download_dir).mkdir(exist_ok=True)
        
        # Multiple Sci-Hub domains to try
        scihub_domains = [
            "https://sci-hub.se/",
            "https://sci-hub.st/",
            "https://sci-hub.ru/",
            "https://sci-hub.ren/"
        ]
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        # Try each domain with a short timeout
        for domain in scihub_domains:
            scihub_url = f"{domain}{paper['doi']}"
            
            if verbose:
                print(f"Trying Sci-Hub domain: {scihub_url}")
            
            try:
                session = create_session_with_retries()
                response = session.get(scihub_url, headers=headers, timeout=5, verify=False)
                response.raise_for_status()
                
                # Parse the HTML response
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # Find the download iframe or link
                iframe = soup.find('iframe', id='pdf')
                
                if iframe and iframe.get('src'):
                    pdf_url = iframe['src']
                    if pdf_url.startswith('//'):
                        pdf_url = 'https:' + pdf_url
                    
                    if verbose:
                        print(f"Found PDF link on Sci-Hub: {pdf_url}")
                    
                    # Download the PDF
                    pdf_response = session.get(pdf_url, headers=headers, stream=True, timeout=5, verify=False)
                    pdf_response.raise_for_status()
                    
                    sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
                    filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
                    
                    with open(filename, 'wb') as f:
                        for chunk in pdf_response.iter_content(chunk_size=8192):
                            f.write(chunk)
                    
                    # Verify the downloaded file is actually a PDF
                    if is_valid_pdf_file(filename, verbose):
                        if verbose:
                            print(f"Successfully downloaded from Sci-Hub to: {filename}")
                        return "downloaded"
                    else:
                        if verbose:
                            print(f"Downloaded file from Sci-Hub is not a valid PDF: {filename}")
                        # Remove the invalid file
                        os.remove(filename)
                        if verbose:
                            print(f"Removed invalid file: {filename}")
                        continue  # Try next domain
                else:
                    # Check for alternative download button
                    download_button = soup.find('a', string='⇣')
                    if download_button and download_button.get('href'):
                        pdf_url = download_button['href']
                        if pdf_url.startswith('//'):
                            pdf_url = 'https:' + pdf_url
                        
                        if verbose:
                            print(f"Found alternative PDF link on Sci-Hub: {pdf_url}")
                        
                        # Download the PDF
                        pdf_response = session.get(pdf_url, headers=headers, stream=True, timeout=5, verify=False)
                        pdf_response.raise_for_status()
                        
                        sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
                        filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
                        
                        with open(filename, 'wb') as f:
                            for chunk in pdf_response.iter_content(chunk_size=8192):
                                f.write(chunk)
                        
                        # Verify the downloaded file is actually a PDF
                        if is_valid_pdf_file(filename, verbose):
                            if verbose:
                                print(f"Successfully downloaded from alternative Sci-Hub link to: {filename}")
                            return "downloaded"
                        else:
                            if verbose:
                                print(f"Downloaded file from alternative Sci-Hub link is not a valid PDF: {filename}")
                            # Remove the invalid file
                            os.remove(filename)
                            if verbose:
                                print(f"Removed invalid file: {filename}")
                            continue  # Try next domain
                
            except Exception as e:
                if verbose:
                    print(f"Failed with domain {domain}: {e}")
        
        if verbose:
            print(f"Could not find PDF on any Sci-Hub domain for DOI: {paper['doi']}")
        return "download failed"
    
    except Exception as e:
        if verbose:
            print(f"Failed to download from Sci-Hub for DOI {paper['doi']}: {e}")
        return "download failed"

def try_download_acm_papers(status_file='data/paper_status.csv', info_file='data/paper_info.csv', download_dir='papers', verbose=False):
    """Try to download papers from ACM Digital Library that failed to download."""
    # This function would contain the implementation for downloading ACM papers
    # For now just print a message
    print("ACM Digital Library paper download not yet implemented")
    print(f"Would attempt to download papers to: {download_dir}")
    print(f"Using status file: {status_file}")
    print(f"Using info file: {info_file}")
    return 