#!/usr/bin/env python3
"""
DOI extraction utilities for scholarly papers.
"""

import re
import io
import requests
from bs4 import BeautifulSoup

from .network import create_session_with_retries
from .web_utils import check_pdf_url, extract_actual_url_from_wayback

# Check if PDF parsing libraries are available
try:
    import PyPDF2
    from pdfminer.high_level import extract_text as pdf_extract_text
    PDF_PARSING_AVAILABLE = True
except ImportError:
    PDF_PARSING_AVAILABLE = False
    print("Warning: PDF parsing libraries not available. Install PyPDF2 and pdfminer.six for enhanced DOI extraction.")

def extract_doi_from_url(url):
    """Attempt to extract DOI from URL or text."""
    if not url:
        return None
    
    # Common DOI patterns - expanded to catch more variants
    doi_patterns = [
        r'10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi\.org/10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi:\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi\s*=\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+'
    ]
    
    # Check if URL contains DOI
    if 'doi.org' in url:
        parsed = urlparse(url) if 'urlparse' in globals() else __import__('urllib.parse').parse.urlparse(url)
        path = parsed.path
        if path.startswith('/'):
            path = path[1:]
        
        for pattern in doi_patterns:
            match = re.search(pattern, path, re.IGNORECASE)
            if match:
                doi = match.group(0)
                # Clean up the DOI if it contains 'doi.org/' or 'doi:'
                if 'doi.org/' in doi:
                    doi = doi.split('doi.org/')[-1]
                elif 'doi:' in doi:
                    doi = doi.split('doi:')[-1].strip()
                return doi.strip()
    
    # Also try to find DOI in the full URL
    for pattern in doi_patterns:
        match = re.search(pattern, url, re.IGNORECASE)
        if match:
            doi = match.group(0)
            # Clean up the DOI if it contains 'doi.org/' or 'doi:'
            if 'doi.org/' in doi:
                doi = doi.split('doi.org/')[-1]
            elif 'doi:' in doi:
                doi = doi.split('doi:')[-1].strip()
            return doi.strip()
    
    return None

def extract_doi_from_text(text):
    """Extract DOI from text content."""
    if not text:
        return None
    
    # Extended DOI patterns
    doi_patterns = [
        r'10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi\.org/10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi:\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi\s*=\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'DOI:\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'identifier.doi\s*=\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+'
    ]
    
    for pattern in doi_patterns:
        match = re.search(pattern, text, re.IGNORECASE)
        if match:
            doi = match.group(0)
            # Clean up the DOI if it contains 'doi.org/' or 'doi:'
            if 'doi.org/' in doi:
                doi = doi.split('doi.org/')[-1]
            elif re.search(r'doi:\s*', doi, re.IGNORECASE):
                doi = re.sub(r'doi:\s*', '', doi, flags=re.IGNORECASE)
            elif re.search(r'doi\s*=\s*', doi, re.IGNORECASE):
                doi = re.sub(r'doi\s*=\s*', '', doi, flags=re.IGNORECASE)
            elif re.search(r'DOI:\s*', doi):
                doi = re.sub(r'DOI:\s*', '', doi)
            elif 'identifier.doi' in doi.lower():
                doi = re.sub(r'identifier\.doi\s*=\s*', '', doi, flags=re.IGNORECASE)
                
            # Ensure DOI starts with 10.
            if doi.startswith('10.'):
                return doi.strip()
    
    return None

def extract_doi_from_pdf_url(pdf_url, verbose=False):
    """Download PDF and extract DOI."""
    if not pdf_url or not PDF_PARSING_AVAILABLE:
        return None
    
    # Handle multiple comma-separated URLs
    if ',' in pdf_url:
        urls = pdf_url.split(',')
        for single_url in urls:
            # Skip SSRN URLs
            if 'papers.ssrn.com' not in single_url:
                doi = extract_doi_from_pdf_url(single_url.strip(), verbose)
                if doi:
                    return doi
        return None
    
    # Skip SSRN URLs
    if 'papers.ssrn.com' in pdf_url:
        if verbose:
            print(f"Skipping SSRN URL as it may be inaccessible: {pdf_url}")
        return None
    
    # Extract actual URL if it's an archive.org URL
    pdf_url = extract_actual_url_from_wayback(pdf_url, verbose)
    
    if verbose:
        print(f"Attempting to extract DOI from PDF: {pdf_url}")
    
    # Extract DOI from URL if it contains one
    doi_from_url = extract_doi_from_url(pdf_url)
    if doi_from_url:
        if verbose:
            print(f"Found DOI in PDF URL: {doi_from_url}")
        return doi_from_url
    
    try:
        # Download the PDF
        session = create_session_with_retries()
        response = session.get(pdf_url, stream=True, timeout=30, verify=False)
        response.raise_for_status()
        
        # Check if content type is PDF
        content_type = response.headers.get('Content-Type', '').lower()
        if 'application/pdf' not in content_type and not pdf_url.lower().endswith('.pdf'):
            if verbose:
                print(f"URL does not point to a PDF: {pdf_url} (Content-Type: {content_type})")
            return None
        
        # Load the PDF content
        pdf_content = io.BytesIO(response.content)
        
        # Try using PyPDF2 first
        try:
            pdf_reader = PyPDF2.PdfReader(pdf_content)
            text = ""
            # Read first few pages (DOIs usually appear early)
            for i in range(min(3, len(pdf_reader.pages))):
                text += pdf_reader.pages[i].extract_text() + " "
                
            if verbose:
                print(f"Extracted text from PDF using PyPDF2 (first 200 chars): {text[:200]}")
                
            doi = extract_doi_from_text(text)
            if doi:
                if verbose:
                    print(f"Found DOI in PDF using PyPDF2: {doi}")
                return doi
        except Exception as e:
            if verbose:
                print(f"PyPDF2 extraction failed: {e}")
        
        # If PyPDF2 fails or doesn't find a DOI, try pdfminer
        try:
            # Reset the BytesIO position
            pdf_content.seek(0)
            text = pdf_extract_text(pdf_content)
            
            if verbose:
                print(f"Extracted text from PDF using pdfminer (first 200 chars): {text[:200]}")
                
            doi = extract_doi_from_text(text)
            if doi:
                if verbose:
                    print(f"Found DOI in PDF using pdfminer: {doi}")
                return doi
        except Exception as e:
            if verbose:
                print(f"pdfminer extraction failed: {e}")
    
    except Exception as e:
        if verbose:
            print(f"Error extracting DOI from PDF {pdf_url}: {e}")
    
    return None

def extract_doi_from_website(url, verbose=False):
    """Extract DOI from a website by visiting the URL."""
    if not url:
        return None
    
    # Skip SSRN URLs and Google Books
    if 'papers.ssrn.com' in url or 'books.google.com' in url:
        if verbose:
            print(f"Skipping URL as it may be inaccessible or not useful: {url}")
        return None
    
    # Extract actual URL if it's an archive.org URL
    url = extract_actual_url_from_wayback(url, verbose)
    
    if verbose:
        print(f"Attempting to extract DOI from website: {url}")
    
    # Extract DOI from URL if it contains one (e.g., ACM Digital Library URLs often contain DOIs)
    if '/doi/' in url:
        doi_from_url = extract_doi_from_url(url)
        if doi_from_url:
            if verbose:
                print(f"Found DOI in URL path: {doi_from_url}")
            return doi_from_url
    
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'DNT': '1',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0'
        }
        
        session = create_session_with_retries()
        response = session.get(url, headers=headers, timeout=30, verify=False)
        response.raise_for_status()
        
        # Get the final URL after redirects
        final_url = response.url
        
        if verbose:
            print(f"Final URL after redirects: {final_url}")
        
        # Check if the final URL contains a DOI
        doi_from_url = extract_doi_from_url(final_url)
        if doi_from_url:
            if verbose:
                print(f"Found DOI in final URL: {doi_from_url}")
            return doi_from_url
        
        # Parse the HTML content
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # Method 1: Look for meta tags with DOI information
        meta_tags = soup.find_all('meta')
        for tag in meta_tags:
            # Various ways DOIs might be stored in meta tags
            name = tag.get('name', '').lower()
            property = tag.get('property', '').lower()
            
            if name in ['citation_doi', 'dc.identifier', 'dc.identifier', 'dc.identifier.doi', 'doi'] or property in ['og:doi']:
                content = tag.get('content')
                if content and re.search(r'10\.\d{4,9}/.+', content):
                    if verbose:
                        print(f"Found DOI in meta tag: {content}")
                    return content.strip()
        
        # Method 2: Look for DOI in text
        text = soup.get_text()
        doi_pattern = r'(?:doi|DOI):\s*(10\.\d{4,9}/[-._;()/:A-Z0-9]+)'
        match = re.search(doi_pattern, text)
        if match:
            if verbose:
                print(f"Found DOI in text: {match.group(1)}")
            return match.group(1).strip()
        
        # Method 3: Look for links to doi.org
        links = soup.find_all('a', href=True)
        for link in links:
            href = link['href']
            if 'doi.org' in href:
                doi = href.split('doi.org/')[-1]
                if re.match(r'10\.\d{4,9}/.+', doi):
                    if verbose:
                        print(f"Found DOI in link to doi.org: {doi}")
                    return doi.strip()
        
        # Method 4: Look for data-doi attributes
        elements_with_data_doi = soup.find_all(attrs={"data-doi": True})
        for element in elements_with_data_doi:
            doi = element.get('data-doi')
            if doi and re.match(r'10\.\d{4,9}/.+', doi):
                if verbose:
                    print(f"Found DOI in data-doi attribute: {doi}")
                return doi.strip()
        
        # Method 5: Look for specific elements that often contain DOIs
        doi_containers = soup.find_all(['div', 'span', 'p'], class_=lambda c: c and ('doi' in c.lower() or 'identifier' in c.lower()))
        for container in doi_containers:
            text = container.get_text()
            doi = extract_doi_from_text(text)
            if doi:
                if verbose:
                    print(f"Found DOI in specific element: {doi}")
                return doi
        
        # Method 6: Generic DOI pattern in text
        generic_doi = extract_doi_from_text(text)
        if generic_doi:
            if verbose:
                print(f"Found DOI using generic pattern: {generic_doi}")
            return generic_doi.strip()
            
    except Exception as e:
        if verbose:
            print(f"Error extracting DOI from website {url}: {e}")
    
    return None

def try_multiple_doi_extraction_methods(paper, verbose=False):
    """Try multiple methods to extract DOI from a paper."""
    if verbose:
        print(f"\nAttempting to extract DOI for paper: {paper['title']}")
    
    # Method 1: Check if DOI is already in the paper data
    if paper.get('doi'):
        if verbose:
            print(f"DOI already available: {paper['doi']}")
        return paper['doi']
    
    # Method 2: Try to extract from website URL - now with archive.org handling
    if paper['website_url']:
        # Check if the URL is directly a DOI URL
        if '/doi/' in paper['website_url']:
            doi = extract_doi_from_url(paper['website_url'])
            if doi:
                if verbose:
                    print(f"Extracted DOI from website URL: {doi}")
                return doi
    
    # Method 3: Try to extract from abstract
    if paper['abstract']:
        doi = extract_doi_from_text(paper['abstract'])
        if doi:
            if verbose:
                print(f"Extracted DOI from abstract: {doi}")
            return doi
    
    # Method 4: If there's a PDF URL, try to extract from it
    if paper['download_url'] and check_pdf_url(paper['download_url'], verbose):
        doi = extract_doi_from_pdf_url(paper['download_url'], verbose)
        if doi:
            if verbose:
                print(f"Extracted DOI from PDF: {doi}")
            return doi
    
    # Method 5: Try to extract from website content
    if paper['website_url']:
        if verbose:
            print(f"Attempting to extract DOI from website content: {paper['website_url']}")
        doi = extract_doi_from_website(paper['website_url'], verbose)
        if doi:
            if verbose:
                print(f"Extracted DOI from website content: {doi}")
            return doi
    
    if verbose:
        print("Failed to extract DOI using all methods")
    
    return None 