#!/usr/bin/env python3
"""
Script to search for papers in Google Scholar using SERP API, and attempt to download them.
"""

import os
import sys
import argparse
import requests
import json
import time
import csv
import re
import io
import urllib3
from pathlib import Path
from urllib.parse import urlparse, parse_qs, urljoin
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import shutil
import uuid
from contextlib import contextmanager
import signal

# Try to import PDF parsing libraries
try:
    import PyPDF2
    from pdfminer.high_level import extract_text as pdf_extract_text
    PDF_PARSING_AVAILABLE = True
except ImportError:
    PDF_PARSING_AVAILABLE = False
    print("Warning: PDF parsing libraries not available. Install PyPDF2 and pdfminer.six for enhanced DOI extraction.")

# Disable SSL warnings for problematic sites
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

def parse_args():
    """Parse command line arguments."""
    parser = argparse.ArgumentParser(description='Search for papers in Google Scholar')
    parser.add_argument('--keyword', action='append', 
                        help='Search keyword (can be used multiple times for OR search)')
    parser.add_argument('-n', '--num', type=int, default=80,
                        help='Number of results to retrieve (default: 80)')
    parser.add_argument('--download', action='store_true',
                        help='Download papers (default: False)')
    parser.add_argument('--doi', action='store_true',
                        help='Extract DOIs and write to a new CSV file (default: False)')
    parser.add_argument('--filter', action='store_true',
                        help='Filter redundant papers and save to paper_info_filter.csv (default: False)')
    parser.add_argument('--validate-pdfs', action='store_true',
                        help='Validate all downloaded PDFs and save results to a cache file (default: False)')
    parser.add_argument('--retry', action='store_true',
                        help='List papers that failed to download for retry (default: False)')
    parser.add_argument('--arxiv', action='store_true',
                        help='Search arxiv.org for papers that failed to download and attempt to download them (default: False)')
    parser.add_argument('--pdf-fail', action='store_true',
                        help='List papers that have PDF download links but failed to download (default: False)')
    parser.add_argument('--acm', action='store_true',
                        help='List papers from dl.acm.org that failed to download (default: False)')
    parser.add_argument('--query', action='store_true',
                        help='Query download status and print information about failed downloads classified by source website')
    parser.add_argument('--precise', action='store_true',
                        help='Match keywords exactly (use quotes for phrases)')
    parser.add_argument('--import', metavar='FILE', dest='import_pdf',
                        help='Import PDF file from local system, identify its title and update status')
    parser.add_argument('--import-dir', metavar='DIR',
                        help='Import all PDF files from a directory, identify their titles and update status')
    parser.add_argument('--update', action='store_true',
                        help='Scan papers directory and update status of previously failed downloads that now exist')
    parser.add_argument('--project', metavar='PROJ_NAME',
                        help='Create a project directory to store all files related to this search')
    parser.add_argument('--check', action='store_true',
                        help='Check for redundant papers in the data directory (default: False)')
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='Enable verbose logging (default: False)')
    return parser.parse_args()

def get_serp_api_key():
    """Get SERP API key from environment variables."""
    api_key = os.environ.get('SERP_API')
    if not api_key:
        print("Error: SERP_API environment variable not set")
        sys.exit(1)
    return api_key

def setup_project_directories(project_name):
    """Create project directory structure and return paths dictionary."""
    if not project_name:
        return {
            'base_dir': '',
            'data_dir': 'data',
            'papers_dir': 'papers'
        }
    
    # Create project parent directory
    project_parent_dir = 'project'
    Path(project_parent_dir).mkdir(exist_ok=True)
    
    # Create project directory
    base_dir = os.path.join(project_parent_dir, project_name)
    data_dir = os.path.join(base_dir, 'data')
    papers_dir = os.path.join(base_dir, 'papers')
    
    # Create directories
    Path(base_dir).mkdir(exist_ok=True)
    Path(data_dir).mkdir(exist_ok=True)
    Path(papers_dir).mkdir(exist_ok=True)
    
    print(f"Created project directory structure in: {base_dir}")
    
    return {
        'base_dir': base_dir,
        'data_dir': data_dir,
        'papers_dir': papers_dir
    }

def create_session_with_retries():
    """Create a requests session with retry capabilities."""
    session = requests.Session()
    retries = Retry(
        total=5,
        backoff_factor=0.5,
        status_forcelist=[429, 500, 502, 503, 504],
        allowed_methods=["HEAD", "GET", "POST"]
    )
    session.mount('http://', HTTPAdapter(max_retries=retries))
    session.mount('https://', HTTPAdapter(max_retries=retries))
    return session

def search_google_scholar(keyword, api_key, num_results=80):
    """Search Google Scholar using SERP API."""
    base_url = "https://serpapi.com/search"
    
    results = []
    start = 0
    
    # Google Scholar typically returns 10 results per page
    results_per_page = 10
    
    # Calculate how many pages to fetch
    num_pages = (num_results + results_per_page - 1) // results_per_page
    
    for page in range(num_pages):
        params = {
            "engine": "google_scholar",
            "q": keyword,
            "api_key": api_key,
            "start": start,
            "num": min(results_per_page, num_results - len(results))
        }
        
        try:
            session = create_session_with_retries()
            response = session.get(base_url, params=params)
            response.raise_for_status()
            data = response.json()
            
            if "organic_results" in data and data["organic_results"]:
                results.extend(data["organic_results"])
                
                # If we've reached our target number of results, stop
                if len(results) >= num_results:
                    break
                
                # Increment the start parameter for pagination
                start += results_per_page
                
                # Be kind to the API by adding a small delay between requests
                time.sleep(1)
            else:
                # No more results
                break
            
        except requests.exceptions.RequestException as e:
            print(f"Error during API request: {e}")
            break
            
    return results[:num_results]  # Ensure we don't return more than requested

def extract_doi_from_url(url):
    """Attempt to extract DOI from URL or text."""
    if not url:
        return None
    
    # Common DOI patterns - expanded to catch more variants
    doi_patterns = [
        r'10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi\.org/10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi:\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi\s*=\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+'
    ]
    
    # Check if URL contains DOI
    if 'doi.org' in url:
        parsed = urlparse(url)
        path = parsed.path
        if path.startswith('/'):
            path = path[1:]
        
        for pattern in doi_patterns:
            match = re.search(pattern, path, re.IGNORECASE)
            if match:
                doi = match.group(0)
                # Clean up the DOI if it contains 'doi.org/' or 'doi:'
                if 'doi.org/' in doi:
                    doi = doi.split('doi.org/')[-1]
                elif 'doi:' in doi:
                    doi = doi.split('doi:')[-1].strip()
                return doi.strip()
    
    # Also try to find DOI in the full URL
    for pattern in doi_patterns:
        match = re.search(pattern, url, re.IGNORECASE)
        if match:
            doi = match.group(0)
            # Clean up the DOI if it contains 'doi.org/' or 'doi:'
            if 'doi.org/' in doi:
                doi = doi.split('doi.org/')[-1]
            elif 'doi:' in doi:
                doi = doi.split('doi:')[-1].strip()
            return doi.strip()
    
    return None

def extract_doi_from_text(text):
    """Extract DOI from text content."""
    if not text:
        return None
    
    # Extended DOI patterns
    doi_patterns = [
        r'10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi\.org/10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi:\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'doi\s*=\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'DOI:\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+',
        r'identifier.doi\s*=\s*10\.\d{4,9}/[-._;()/:A-Z0-9]+'
    ]
    
    for pattern in doi_patterns:
        match = re.search(pattern, text, re.IGNORECASE)
        if match:
            doi = match.group(0)
            # Clean up the DOI if it contains 'doi.org/' or 'doi:'
            if 'doi.org/' in doi:
                doi = doi.split('doi.org/')[-1]
            elif re.search(r'doi:\s*', doi, re.IGNORECASE):
                doi = re.sub(r'doi:\s*', '', doi, flags=re.IGNORECASE)
            elif re.search(r'doi\s*=\s*', doi, re.IGNORECASE):
                doi = re.sub(r'doi\s*=\s*', '', doi, flags=re.IGNORECASE)
            elif re.search(r'DOI:\s*', doi):
                doi = re.sub(r'DOI:\s*', '', doi)
            elif 'identifier.doi' in doi.lower():
                doi = re.sub(r'identifier\.doi\s*=\s*', '', doi, flags=re.IGNORECASE)
                
            # Ensure DOI starts with 10.
            if doi.startswith('10.'):
                return doi.strip()
    
    return None

def extract_actual_url_from_wayback(url, verbose=False):
    """Extract the actual paper URL from a wayback or archive.org URL."""
    if not url:
        return url
    
    # If the URL contains multiple comma-separated URLs, process each one
    if ',' in url and not (url.startswith('"') and url.endswith('"')):
        urls = url.split(',')
        processed_urls = []
        for single_url in urls:
            processed_url = extract_actual_url_from_wayback(single_url.strip(), verbose)
            if processed_url:
                processed_urls.append(processed_url)
        
        if processed_urls:
            if verbose and len(processed_urls) > 1:
                print(f"Processed multiple URLs: {processed_urls}")
            return ','.join(processed_urls)
    
    # Patterns for archive.org wayback URLs
    wayback_patterns = [
        # Handle URLs where the captured URL is the rest of the string without explicit separator
        r'https?://scholar\.archive\.org/work/[^/]+/access/wayback/(https?://.+)$',
        r'https?://web\.archive\.org/web/\d+/(https?://.+)$',
        r'https?://archive\.org/[^/]+/[^/]+/(https?://.+)$'
    ]
    
    for pattern in wayback_patterns:
        match = re.search(pattern, url)
        if match:
            actual_url = match.group(1)
            if verbose:
                print(f"Extracted actual URL from archive.org: {actual_url}")
            return actual_url
    
    # Special handling for scholar.archive.org URLs where standard regex fails
    if 'scholar.archive.org/work/' in url and '/access/wayback/' in url:
        try:
            # Split URL at the wayback marker
            parts = url.split('/access/wayback/')
            if len(parts) == 2 and parts[1].startswith('http'):
                actual_url = parts[1]
                if verbose:
                    print(f"Extracted actual URL using special handling: {actual_url}")
                return actual_url
        except Exception as e:
            if verbose:
                print(f"Error in special handling for archive.org URL: {e}")
    
    return url

def convert_to_direct_download_url(url, verbose=False):
    """Convert URLs to direct download links where possible."""
    if not url:
        return url
    
    # Handle arXiv URLs - convert from abstract page to direct PDF
    # e.g., https://arxiv.org/abs/2401.00434 -> https://arxiv.org/pdf/2401.00434.pdf
    arxiv_pattern = r'https?://arxiv\.org/abs/([0-9]+\.[0-9]+(?:v[0-9]+)?)'
    match = re.search(arxiv_pattern, url)
    if match:
        paper_id = match.group(1)
        pdf_url = f"https://arxiv.org/pdf/{paper_id}.pdf"
        if verbose:
            print(f"Converted arXiv URL to direct PDF: {pdf_url}")
        return pdf_url
    
    # Handle arXiv DOI URLs
    # e.g., https://doi.org/10.48550/arXiv.2401.00434 -> https://arxiv.org/pdf/2401.00434.pdf
    arxiv_doi_pattern = r'https?://doi\.org/10\.48550/arXiv\.([0-9]+\.[0-9]+(?:v[0-9]+)?)'
    match = re.search(arxiv_doi_pattern, url)
    if match:
        paper_id = match.group(1)
        pdf_url = f"https://arxiv.org/pdf/{paper_id}.pdf"
        if verbose:
            print(f"Converted arXiv DOI URL to direct PDF: {pdf_url}")
        return pdf_url
    
    return url

def check_pdf_url(url, verbose=False, timeout=5):
    """Check if a URL points to a PDF file."""
    if not url:
        return False
    
    # Handle multiple comma-separated URLs
    if ',' in url:
        urls = url.split(',')
        for single_url in urls:
            if check_pdf_url(single_url.strip(), verbose, timeout):
                return True
        return False
    
    # Skip SSRN URLs as they are inaccessible in some regions
    if 'papers.ssrn.com' in url:
        if verbose:
            print(f"Skipping SSRN URL as it may be inaccessible: {url}")
        return False
    
    # Extract actual URL if it's an archive.org URL
    url = extract_actual_url_from_wayback(url, verbose)
    
    if verbose:
        print(f"Checking if URL is PDF: {url}")
    
    # If URL ends with .pdf, it's likely a PDF
    if url.lower().endswith('.pdf'):
        if verbose:
            print(f"URL ends with .pdf: {url}")
        return True
    
    # If URL contains DOI and looks like a PDF link from a major publisher
    if '/doi/pdf/' in url.lower() or '/pdf/' in url.lower():
        if verbose:
            print(f"URL contains PDF path: {url}")
        return True
    
    try:
        # Send a HEAD request to check content type with short timeout
        session = create_session_with_retries()
        response = session.head(url, timeout=timeout, allow_redirects=True, verify=False)
        content_type = response.headers.get('Content-Type', '').lower()
        
        if verbose:
            print(f"Content-Type for {url}: {content_type}")
        
        # Check if content type is PDF
        is_pdf = 'application/pdf' in content_type
        
        if is_pdf and verbose:
            print(f"URL confirmed as PDF: {url}")
            
        return is_pdf
    except requests.exceptions.Timeout:
        if verbose:
            print(f"Timeout checking PDF URL {url} - will try direct download instead")
        return False
    except Exception as e:
        if verbose:
            print(f"Error checking PDF URL {url}: {e}")
        return False

def extract_doi_from_pdf_url(pdf_url, verbose=False):
    """Download PDF and extract DOI."""
    if not pdf_url or not PDF_PARSING_AVAILABLE:
        return None
    
    # Handle multiple comma-separated URLs
    if ',' in pdf_url:
        urls = pdf_url.split(',')
        for single_url in urls:
            # Skip SSRN URLs
            if 'papers.ssrn.com' not in single_url:
                doi = extract_doi_from_pdf_url(single_url.strip(), verbose)
                if doi:
                    return doi
        return None
    
    # Skip SSRN URLs
    if 'papers.ssrn.com' in pdf_url:
        if verbose:
            print(f"Skipping SSRN URL as it may be inaccessible: {pdf_url}")
        return None
    
    # Extract actual URL if it's an archive.org URL
    pdf_url = extract_actual_url_from_wayback(pdf_url, verbose)
    
    if verbose:
        print(f"Attempting to extract DOI from PDF: {pdf_url}")
    
    # Extract DOI from URL if it contains one
    doi_from_url = extract_doi_from_url(pdf_url)
    if doi_from_url:
        if verbose:
            print(f"Found DOI in PDF URL: {doi_from_url}")
        return doi_from_url
    
    try:
        # Download the PDF
        session = create_session_with_retries()
        response = session.get(pdf_url, stream=True, timeout=30, verify=False)
        response.raise_for_status()
        
        # Check if content type is PDF
        content_type = response.headers.get('Content-Type', '').lower()
        if 'application/pdf' not in content_type and not pdf_url.lower().endswith('.pdf'):
            if verbose:
                print(f"URL does not point to a PDF: {pdf_url} (Content-Type: {content_type})")
            return None
        
        # Load the PDF content
        pdf_content = io.BytesIO(response.content)
        
        # Try using PyPDF2 first
        try:
            pdf_reader = PyPDF2.PdfReader(pdf_content)
            text = ""
            # Read first few pages (DOIs usually appear early)
            for i in range(min(3, len(pdf_reader.pages))):
                text += pdf_reader.pages[i].extract_text() + " "
                
            if verbose:
                print(f"Extracted text from PDF using PyPDF2 (first 200 chars): {text[:200]}")
                
            doi = extract_doi_from_text(text)
            if doi:
                if verbose:
                    print(f"Found DOI in PDF using PyPDF2: {doi}")
                return doi
        except Exception as e:
            if verbose:
                print(f"PyPDF2 extraction failed: {e}")
        
        # If PyPDF2 fails or doesn't find a DOI, try pdfminer
        try:
            # Reset the BytesIO position
            pdf_content.seek(0)
            text = pdf_extract_text(pdf_content)
            
            if verbose:
                print(f"Extracted text from PDF using pdfminer (first 200 chars): {text[:200]}")
                
            doi = extract_doi_from_text(text)
            if doi:
                if verbose:
                    print(f"Found DOI in PDF using pdfminer: {doi}")
                return doi
        except Exception as e:
            if verbose:
                print(f"pdfminer extraction failed: {e}")
    
    except Exception as e:
        if verbose:
            print(f"Error extracting DOI from PDF {pdf_url}: {e}")
    
    return None

def extract_doi_from_website(url, verbose=False):
    """Extract DOI from a website by visiting the URL."""
    if not url:
        return None
    
    # Skip SSRN URLs and Google Books
    if 'papers.ssrn.com' in url or 'books.google.com' in url:
        if verbose:
            print(f"Skipping URL as it may be inaccessible or not useful: {url}")
        return None
    
    # Extract actual URL if it's an archive.org URL
    url = extract_actual_url_from_wayback(url, verbose)
    
    if verbose:
        print(f"Attempting to extract DOI from website: {url}")
    
    # Extract DOI from URL if it contains one (e.g., ACM Digital Library URLs often contain DOIs)
    if '/doi/' in url:
        doi_from_url = extract_doi_from_url(url)
        if doi_from_url:
            if verbose:
                print(f"Found DOI in URL path: {doi_from_url}")
            return doi_from_url
    
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'DNT': '1',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0'
        }
        
        session = create_session_with_retries()
        response = session.get(url, headers=headers, timeout=30, verify=False)
        response.raise_for_status()
        
        # Get the final URL after redirects
        final_url = response.url
        
        if verbose:
            print(f"Final URL after redirects: {final_url}")
        
        # Check if the final URL contains a DOI
        doi_from_url = extract_doi_from_url(final_url)
        if doi_from_url:
            if verbose:
                print(f"Found DOI in final URL: {doi_from_url}")
            return doi_from_url
        
        # Parse the HTML content
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # Method 1: Look for meta tags with DOI information
        meta_tags = soup.find_all('meta')
        for tag in meta_tags:
            # Various ways DOIs might be stored in meta tags
            name = tag.get('name', '').lower()
            property = tag.get('property', '').lower()
            
            if name in ['citation_doi', 'dc.identifier', 'dc.identifier', 'dc.identifier.doi', 'doi'] or property in ['og:doi']:
                content = tag.get('content')
                if content and re.search(r'10\.\d{4,9}/.+', content):
                    if verbose:
                        print(f"Found DOI in meta tag: {content}")
                    return content.strip()
        
        # Method 2: Look for DOI in text
        text = soup.get_text()
        doi_pattern = r'(?:doi|DOI):\s*(10\.\d{4,9}/[-._;()/:A-Z0-9]+)'
        match = re.search(doi_pattern, text)
        if match:
            if verbose:
                print(f"Found DOI in text: {match.group(1)}")
            return match.group(1).strip()
        
        # Method 3: Look for links to doi.org
        links = soup.find_all('a', href=True)
        for link in links:
            href = link['href']
            if 'doi.org' in href:
                doi = href.split('doi.org/')[-1]
                if re.match(r'10\.\d{4,9}/.+', doi):
                    if verbose:
                        print(f"Found DOI in link to doi.org: {doi}")
                    return doi.strip()
        
        # Method 4: Look for data-doi attributes
        elements_with_data_doi = soup.find_all(attrs={"data-doi": True})
        for element in elements_with_data_doi:
            doi = element.get('data-doi')
            if doi and re.match(r'10\.\d{4,9}/.+', doi):
                if verbose:
                    print(f"Found DOI in data-doi attribute: {doi}")
                return doi.strip()
        
        # Method 5: Look for specific elements that often contain DOIs
        doi_containers = soup.find_all(['div', 'span', 'p'], class_=lambda c: c and ('doi' in c.lower() or 'identifier' in c.lower()))
        for container in doi_containers:
            text = container.get_text()
            doi = extract_doi_from_text(text)
            if doi:
                if verbose:
                    print(f"Found DOI in specific element: {doi}")
                return doi
        
        # Method 6: Generic DOI pattern in text
        generic_doi = extract_doi_from_text(text)
        if generic_doi:
            if verbose:
                print(f"Found DOI using generic pattern: {generic_doi}")
            return generic_doi.strip()
            
    except Exception as e:
        if verbose:
            print(f"Error extracting DOI from website {url}: {e}")
    
    return None

def try_multiple_doi_extraction_methods(paper, verbose=False):
    """Try multiple methods to extract DOI from a paper."""
    if verbose:
        print(f"\nAttempting to extract DOI for paper: {paper['title']}")
    
    # Method 1: Check if DOI is already in the paper data
    if paper.get('doi'):
        if verbose:
            print(f"DOI already available: {paper['doi']}")
        return paper['doi']
    
    # Method 2: Try to extract from website URL - now with archive.org handling
    if paper['website_url']:
        # Check if the URL is directly a DOI URL
        if '/doi/' in paper['website_url']:
            doi = extract_doi_from_url(paper['website_url'])
            if doi:
                if verbose:
                    print(f"Extracted DOI from website URL: {doi}")
                return doi
    
    # Method 3: Try to extract from abstract
    if paper['abstract']:
        doi = extract_doi_from_text(paper['abstract'])
        if doi:
            if verbose:
                print(f"Extracted DOI from abstract: {doi}")
            return doi
    
    # Method 4: If there's a PDF URL, try to extract from it
    if paper['download_url'] and check_pdf_url(paper['download_url'], verbose):
        doi = extract_doi_from_pdf_url(paper['download_url'], verbose)
        if doi:
            if verbose:
                print(f"Extracted DOI from PDF: {doi}")
            return doi
    
    # Method 5: Try to extract from website content
    if paper['website_url']:
        if verbose:
            print(f"Attempting to extract DOI from website content: {paper['website_url']}")
        doi = extract_doi_from_website(paper['website_url'], verbose)
        if doi:
            if verbose:
                print(f"Extracted DOI from website content: {doi}")
            return doi
    
    if verbose:
        print("Failed to extract DOI using all methods")
    
    return None

def process_search_result(result):
    """Process a search result to extract relevant information."""
    paper = {
        'id': result.get('result_id', ''),
        'title': result.get('title', 'Unknown Title'),
        'website_url': '',
        'download_url': '',
        'abstract': result.get('snippet', ''),
        'doi': None,
        'citations': 0,
        'authors': '',
        'publication_date': '',
        'journal': '',
        'citation_link': '',
        'author_year': ''  # New field for first author and year
    }
    
    # Get and clean the website URL
    original_url = result.get('link', '')
    paper['original_url'] = original_url
    paper['website_url'] = extract_actual_url_from_wayback(original_url)
    
    # Look for PDF links in resources
    if 'resources' in result:
        pdf_urls = []
        for resource in result['resources']:
            if resource.get('file_format', '').lower() == 'pdf':
                # Clean the PDF URL as well
                pdf_url = resource.get('link', '')
                cleaned_url = extract_actual_url_from_wayback(pdf_url)
                if cleaned_url and cleaned_url not in pdf_urls:
                    pdf_urls.append(cleaned_url)
        
        if pdf_urls:
            paper['download_url'] = ','.join(pdf_urls)
    
    # Extract publication information
    if 'publication_info' in result:
        pub_info = result.get('publication_info', {})
        summary = pub_info.get('summary', '')
        paper['publication_info'] = summary
        
        # Try to extract authors and journal
        if summary:
            parts = summary.split(' - ')
            if len(parts) >= 2:
                paper['authors'] = parts[0]
                journal_parts = parts[1].split(', ')
                if len(journal_parts) >= 2:
                    paper['journal'] = journal_parts[0]
                    # Try to extract publication date
                    try:
                        paper['publication_date'] = journal_parts[1]
                    except IndexError:
                        pass
    
    # Extract citation information
    if 'inline_links' in result:
        inline_links = result.get('inline_links', {})
        
        # Get citation count
        cited_by = inline_links.get('cited_by', {})
        if cited_by:
            paper['citations'] = cited_by.get('total', 0)
            paper['citation_link'] = cited_by.get('link', '')
            paper['citation_id'] = cited_by.get('cites_id', '')
            # Add SerpApi citation link for potential further scraping
            if 'serpapi_scholar_link' in cited_by:
                paper['serpapi_citation_link'] = cited_by.get('serpapi_scholar_link', '')
    
    # Extract first author and publication year for author_year field
    first_author = ""
    pub_year = ""
    
    # Get first author if available
    if paper['authors']:
        # Split by comma and get the first author
        authors_list = paper['authors'].split(',')
        if authors_list:
            first_author = authors_list[0].strip()
    
    # Get publication year if available
    if paper['publication_date']:
        # Try to extract a 4-digit year
        year_match = re.search(r'\b(19|20)\d{2}\b', paper['publication_date'])
        if year_match:
            pub_year = year_match.group(0)
    
    # Combine author and year if both are available
    if first_author and pub_year:
        paper['author_year'] = f"{first_author}, {pub_year}"
    elif first_author:
        paper['author_year'] = first_author
    elif pub_year:
        paper['author_year'] = pub_year
    
    return paper

def is_valid_pdf_file(file_path, verbose=False):
    """Check if a file is actually a valid PDF file."""
    if not os.path.exists(file_path):
        return False
        
    # Check the file size - PDFs usually aren't tiny
    file_size = os.path.getsize(file_path)
    if file_size < 1000:  # Files under 1KB are suspicious
        if verbose:
            print(f"File is suspiciously small: {file_size} bytes")
        return False
    
    # Try to read the first few bytes to check for PDF signature
    try:
        with open(file_path, 'rb') as f:
            header = f.read(5)
            # PDF files start with %PDF-
            if header != b'%PDF-':
                if verbose:
                    print(f"File doesn't have PDF signature, found: {header}")
                # Try to check if it's HTML
                f.seek(0)
                content_start = f.read(1000).lower()
                if b'<!doctype html>' in content_start or b'<html' in content_start:
                    if verbose:
                        print(f"File appears to be HTML, not PDF")
                    return False
                return False
    except Exception as e:
        if verbose:
            print(f"Error checking PDF file: {e}")
        return False
        
    # Try to parse with PyPDF2 if available
    if PDF_PARSING_AVAILABLE:
        try:
            with open(file_path, 'rb') as f:
                try:
                    pdf = PyPDF2.PdfReader(f)
                    # Try to get the number of pages (this will fail if not a valid PDF)
                    num_pages = len(pdf.pages)
                    if verbose:
                        print(f"Valid PDF with {num_pages} pages")
                    return True
                except Exception as e:
                    if verbose:
                        print(f"PyPDF2 couldn't parse the file: {e}")
                    return False
        except Exception as e:
            if verbose:
                print(f"Error opening file with PyPDF2: {e}")
            return False
    
    # If PyPDF2 is not available but we got past the header check, it's likely a PDF
    return True

def try_download_paper(paper, download_dir='papers', verbose=False):
    """Try to download a paper and return its status."""
    # Create download directory if it doesn't exist
    Path(download_dir).mkdir(exist_ok=True)
    
    status = "undo"
    
    # Skip SSRN URLs and Google-related URLs
    if paper['download_url']:
        if ('papers.ssrn.com' in paper['download_url'] or 
            'books.google.' in paper['download_url'] or 
            'scholar.google.' in paper['download_url']):
            if verbose:
                print(f"Skipping URL as it may be inaccessible or not useful: {paper['download_url']}")
            return "inaccessible"
    if paper['website_url']:
        if ('papers.ssrn.com' in paper['website_url'] or 
            'books.google.' in paper['website_url'] or 
            'scholar.google.' in paper['website_url']):
            if verbose:
                print(f"Skipping URL as it may be inaccessible or not useful: {paper['website_url']}")
            return "inaccessible"
    
    # Function to handle direct download attempts
    def attempt_download(url, filename):
        try:
            # Skip SSRN and Google URLs
            if ('papers.ssrn.com' in url or 
                'books.google.' in url or 
                'scholar.google.' in url):
                if verbose:
                    print(f"Skipping URL as it may be inaccessible or not useful: {url}")
                return False
                
            # First, extract actual URL if it's an archive.org URL
            clean_url = extract_actual_url_from_wayback(url, verbose)
            
            # Convert to direct download URL if possible (e.g., arXiv)
            clean_url = convert_to_direct_download_url(clean_url, verbose)
            
            if verbose:
                if clean_url != url:
                    print(f"Using direct download URL: {clean_url}")
                print(f"Attempting direct download from: {clean_url}")
                
            session = create_session_with_retries()
            response = session.get(clean_url, stream=True, timeout=5, verify=False)
            response.raise_for_status()
            
            # Check content type to warn about potential non-PDF downloads
            content_type = response.headers.get('Content-Type', '').lower()
            if 'application/pdf' not in content_type and 'application/octet-stream' not in content_type:
                if verbose:
                    print(f"Warning: Content-Type is not PDF: {content_type}")
            
            with open(filename, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            
            # Just check if file exists and is not empty
            if os.path.exists(filename) and os.path.getsize(filename) > 0:
                return True
            else:
                if verbose:
                    print(f"Downloaded file is empty or missing: {filename}")
                # Remove the invalid file
                if os.path.exists(filename):
                    os.remove(filename)
                    if verbose:
                        print(f"Removed invalid file: {filename}")
                return False
                
        except Exception as e:
            if verbose:
                print(f"Download attempt failed for {url}: {e}")
            # Clean up any partially downloaded file
            if os.path.exists(filename):
                os.remove(filename)
                if verbose:
                    print(f"Removed partial download: {filename}")
            return False
    
    # Special handling for arXiv URLs - prioritize direct PDF download
    if ((paper['website_url'] and ('arxiv.org/abs/' in paper['website_url'] or 
                                 'doi.org/10.48550/arXiv.' in paper['website_url'])) or
        (paper['download_url'] and ('arxiv.org/abs/' in paper['download_url'] or 
                                   'doi.org/10.48550/arXiv.' in paper['download_url']))):
        if verbose:
            print(f"ArXiv URL detected, attempting direct PDF download")
        
        sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
        filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
        
        url_to_use = paper['download_url'] if ('arxiv.org/abs/' in paper['download_url'] or 
                                              'doi.org/10.48550/arXiv.' in paper['download_url']) else paper['website_url']
        
        direct_url = convert_to_direct_download_url(url_to_use, verbose)
        if attempt_download(direct_url, filename):
            if verbose:
                print(f"Successfully downloaded arXiv paper to: {filename}")
            return "downloaded"
    
    sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
    filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
    
    # First try the download_url if available
    if paper['download_url']:
        # Handle multiple comma-separated URLs
        if ',' in paper['download_url']:
            urls = paper['download_url'].split(',')
            
            # First, try URLs that appear to be PDFs
            pdf_urls = [url.strip() for url in urls if url.strip().lower().endswith('.pdf') or '/pdf/' in url.strip().lower()]
            other_urls = [url.strip() for url in urls if url.strip() not in pdf_urls and url.strip()]
            
            # Filter out SSRN URLs
            pdf_urls = [url for url in pdf_urls if 'papers.ssrn.com' not in url]
            other_urls = [url for url in other_urls if 'papers.ssrn.com' not in url]
            
            if verbose and pdf_urls:
                print(f"Found {len(pdf_urls)} direct PDF URLs to try first")
            
            # Try PDF URLs first
            for pdf_url in pdf_urls:
                if verbose:
                    print(f"Trying direct PDF URL: {pdf_url}")
                
                if attempt_download(pdf_url, filename):
                    if verbose:
                        print(f"Successfully downloaded from PDF URL: {pdf_url} to: {filename}")
                    return "downloaded"
            
            # Then try other URLs
            for other_url in other_urls:
                if verbose:
                    print(f"Trying non-PDF URL: {other_url}")
                
                if attempt_download(other_url, filename):
                    if verbose:
                        print(f"Successfully downloaded from URL: {other_url} to: {filename}")
                    return "downloaded"
            
            # If all download_urls failed, try the website_url as a fallback
            if paper['website_url'] and not any(paper['website_url'] == url for url in urls) and 'papers.ssrn.com' not in paper['website_url']:
                if verbose:
                    print(f"All download URLs failed, trying website URL: {paper['website_url']}")
                
                if attempt_download(paper['website_url'], filename):
                    if verbose:
                        print(f"Successfully downloaded from website URL: {paper['website_url']} to: {filename}")
                    return "downloaded"
            
            # If we're here, all URLs failed
            status = "download failed"
        else:
            # Single URL - check if it's a PDF
            is_likely_pdf = paper['download_url'].lower().endswith('.pdf') or '/pdf/' in paper['download_url'].lower()
            
            if is_likely_pdf and verbose:
                print(f"URL appears to be a direct PDF: {paper['download_url']}")
            
            if verbose:
                print(f"Attempting to download: {paper['download_url']}")
            
            # Directly attempt download
            if attempt_download(paper['download_url'], filename):
                if verbose:
                    print(f"Successfully downloaded to: {filename}")
                return "downloaded"
            
            # If download_url failed, try website_url as a fallback
            if paper['website_url'] and paper['website_url'] != paper['download_url'] and 'papers.ssrn.com' not in paper['website_url']:
                if verbose:
                    print(f"Download URL failed, trying website URL: {paper['website_url']}")
                
                if attempt_download(paper['website_url'], filename):
                    if verbose:
                        print(f"Successfully downloaded from website URL: {paper['website_url']} to: {filename}")
                    return "downloaded"
            
            status = "download failed"
    
    # If no download_url, try website_url directly
    elif paper['website_url'] and 'papers.ssrn.com' not in paper['website_url']:
        if verbose:
            print(f"No download URL, trying website URL directly: {paper['website_url']}")
        
        if attempt_download(paper['website_url'], filename):
            if verbose:
                print(f"Successfully downloaded from website URL: {paper['website_url']} to: {filename}")
            return "downloaded"
        
        status = "download failed"
    else:
        if verbose:
            print(f"No download URL or website URL available for: {paper['title']}")
        status = "download failed"
        
    return status

def try_download_from_scihub(paper, download_dir='papers', verbose=False):
    """Try to download a paper from Sci-Hub using its DOI."""
    if not paper['doi']:
        if verbose:
            print(f"No DOI available for Sci-Hub download: {paper['title']}")
        return "download failed"
    
    try:
        # Create download directory if it doesn't exist
        Path(download_dir).mkdir(exist_ok=True)
        
        # Multiple Sci-Hub domains to try
        scihub_domains = [
            "https://sci-hub.se/",
            "https://sci-hub.st/",
            "https://sci-hub.ru/",
            "https://sci-hub.ren/"
        ]
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        # Try each domain with a short timeout
        for domain in scihub_domains:
            scihub_url = f"{domain}{paper['doi']}"
            
            if verbose:
                print(f"Trying Sci-Hub domain: {scihub_url}")
            
            try:
                session = create_session_with_retries()
                response = session.get(scihub_url, headers=headers, timeout=5, verify=False)
                response.raise_for_status()
                
                # Parse the HTML response
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # Find the download iframe or link
                iframe = soup.find('iframe', id='pdf')
                
                if iframe and iframe.get('src'):
                    pdf_url = iframe['src']
                    if pdf_url.startswith('//'):
                        pdf_url = 'https:' + pdf_url
                    
                    if verbose:
                        print(f"Found PDF link on Sci-Hub: {pdf_url}")
                    
                    # Download the PDF
                    pdf_response = session.get(pdf_url, headers=headers, stream=True, timeout=5, verify=False)
                    pdf_response.raise_for_status()
                    
                    sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
                    filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
                    
                    with open(filename, 'wb') as f:
                        for chunk in pdf_response.iter_content(chunk_size=8192):
                            f.write(chunk)
                    
                    # Verify the downloaded file is actually a PDF
                    if is_valid_pdf_file(filename, verbose):
                        if verbose:
                            print(f"Successfully downloaded from Sci-Hub to: {filename}")
                        return "downloaded"
                    else:
                        if verbose:
                            print(f"Downloaded file from Sci-Hub is not a valid PDF: {filename}")
                        # Remove the invalid file
                        os.remove(filename)
                        if verbose:
                            print(f"Removed invalid file: {filename}")
                        continue  # Try next domain
                else:
                    # Check for alternative download button
                    download_button = soup.find('a', string='⇣')
                    if download_button and download_button.get('href'):
                        pdf_url = download_button['href']
                        if pdf_url.startswith('//'):
                            pdf_url = 'https:' + pdf_url
                        
                        if verbose:
                            print(f"Found alternative PDF link on Sci-Hub: {pdf_url}")
                        
                        # Download the PDF
                        pdf_response = session.get(pdf_url, headers=headers, stream=True, timeout=5, verify=False)
                        pdf_response.raise_for_status()
                        
                        sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
                        filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
                        
                        with open(filename, 'wb') as f:
                            for chunk in pdf_response.iter_content(chunk_size=8192):
                                f.write(chunk)
                        
                        # Verify the downloaded file is actually a PDF
                        if is_valid_pdf_file(filename, verbose):
                            if verbose:
                                print(f"Successfully downloaded from alternative Sci-Hub link to: {filename}")
                            return "downloaded"
                        else:
                            if verbose:
                                print(f"Downloaded file from alternative Sci-Hub link is not a valid PDF: {filename}")
                            # Remove the invalid file
                            os.remove(filename)
                            if verbose:
                                print(f"Removed invalid file: {filename}")
                            continue  # Try next domain
                
            except Exception as e:
                if verbose:
                    print(f"Failed with domain {domain}: {e}")
        
        if verbose:
            print(f"Could not find PDF on any Sci-Hub domain for DOI: {paper['doi']}")
        return "download failed"
    
    except Exception as e:
        if verbose:
            print(f"Failed to download from Sci-Hub for DOI {paper['doi']}: {e}")
        return "download failed"

def read_paper_info_csv(filename='data/paper_info.csv'):
    """Read paper information from a CSV file."""
    papers = []
    try:
        # Read raw file to handle commas in URLs properly
        with open(filename, 'r', encoding='utf-8') as f:
            # Read the header to get column indices
            header = f.readline().strip().split(',')
            website_url_idx = header.index('website_url') if 'website_url' in header else 2
            download_url_idx = header.index('download_url') if 'download_url' in header else 3
            
            # Use csv module to properly handle quoted fields
            f.seek(0)  # Reset file pointer to beginning
            reader = csv.reader(f)
            next(reader)  # Skip header
            
            for row in reader:
                # Ensure the row has enough elements
                if len(row) > max(website_url_idx, download_url_idx):
                    paper = {
                        'id': row[0] if len(row) > 0 else '',
                        'title': row[1] if len(row) > 1 else 'Unknown Title',
                        'website_url': row[website_url_idx] if len(row) > website_url_idx else '',
                        'download_url': row[download_url_idx] if len(row) > download_url_idx else '',
                        'abstract': row[4] if len(row) > 4 else '',
                        'doi': None,
                        'authors': row[5] if len(row) > 5 else '',
                        'journal': row[6] if len(row) > 6 else '',
                        'publication_date': row[7] if len(row) > 7 else '',
                        'citations': row[8] if len(row) > 8 else 0,
                        'citation_link': row[9] if len(row) > 9 else '',
                        'author_year': row[10] if len(row) > 10 else ''
                    }
                    papers.append(paper)
        
        print(f"Read {len(papers)} papers from {filename}")
    except FileNotFoundError:
        print(f"File {filename} not found")
    except Exception as e:
        print(f"Error reading {filename}: {e}")
        # Try the old method as fallback
        try:
            with open(filename, 'r', newline='', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                for row in reader:
                    paper = {
                        'id': row.get('id', ''),
                        'title': row.get('title', ''),
                        'website_url': row.get('website_url', ''),
                        'download_url': row.get('download_url', ''),
                        'abstract': row.get('abstract', ''),
                        'doi': None,
                        'authors': row.get('authors', ''),
                        'journal': row.get('journal', ''),
                        'publication_date': row.get('publication_date', ''),
                        'citations': row.get('citations', 0),
                        'citation_link': row.get('citation_link', ''),
                        'author_year': row.get('author_year', '')
                    }
                    papers.append(paper)
            print(f"Read {len(papers)} papers from {filename} using fallback method")
        except Exception as e2:
            print(f"Fallback method also failed: {e2}")
    
    return papers

def extract_and_write_dois(papers, output_file='data/paper_doi.csv', verbose=False):
    """Extract DOIs from papers and write to a CSV file."""
    # Create data directory if it doesn't exist
    Path(os.path.dirname(output_file)).mkdir(exist_ok=True)
    
    # Extract DOIs
    doi_count = 0
    pdf_exists_count = 0
    
    print("Extracting DOIs from papers...")
    total_papers = len(papers)
    
    for i, paper in enumerate(papers):
        print(f"Processing paper {i+1}/{total_papers}: {paper['title'][:50]}...")
        
        # Check if PDF exists - use shorter timeout for problematic URLs
        try:
            pdf_exists = False
            if paper['download_url']:
                # Skip SSRN URLs
                if 'papers.ssrn.com' in paper['download_url']:
                    if verbose:
                        print(f"Skipping SSRN URL as it may be inaccessible: {paper['download_url']}")
                else:
                    pdf_exists = check_pdf_url(paper['download_url'], verbose, timeout=5)
            
            if pdf_exists:
                pdf_exists_count += 1
                if verbose:
                    print(f"PDF exists for paper: {paper['title']}")
        except Exception as e:
            if verbose:
                print(f"Error checking if PDF exists: {e}")
        
        # Try to extract DOI directly from URL
        if not paper.get('doi') and paper['website_url']:
            # Special case for DOI prefixes in URLs
            if '/doi/' in paper['website_url']:
                try:
                    doi_match = re.search(r'/doi/(?:abs|full|pdf)?/?(10\.\d{4,}[./][^/?&]+)', paper['website_url'])
                    if doi_match:
                        paper['doi'] = doi_match.group(1)
                        if verbose:
                            print(f"Extracted DOI from URL path: {paper['doi']}")
                except Exception as e:
                    if verbose:
                        print(f"Error extracting DOI from URL: {e}")
        
        # Try all DOI extraction methods
        if not paper.get('doi'):
            paper['doi'] = try_multiple_doi_extraction_methods(paper, verbose)
                
        # Count the successes
        if paper.get('doi'):
            doi_count += 1
            if verbose:
                print(f"Successfully extracted DOI: {paper['doi']}")
        else:
            if verbose:
                print(f"Failed to extract DOI for paper: {paper['title']}")

    # Write to CSV
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['paper_id', 'paper_title', 'doi', 'status'])
        for paper in papers:
            # Determine status based on PDF existence
            status = "undo"
            
            # Skip SSRN URLs
            if paper['download_url'] and 'papers.ssrn.com' in paper['download_url']:
                status = "inaccessible"
            elif paper['download_url'] and check_pdf_url(paper['download_url'], verbose=False, timeout=3):
                status = "pdf exist"
                
            writer.writerow([
                paper['id'],
                paper['title'],
                paper['doi'] or 'Unknown',
                status
            ])
    
    print(f"DOI information saved to {output_file}")
    
    # Print summary
    print(f"\nDOI Summary:")
    print(f"Total papers: {len(papers)}")
    print(f"Papers with DOI: {doi_count}")
    print(f"Papers without DOI: {len(papers) - doi_count}")
    print(f"Papers with PDF: {pdf_exists_count}")

def filter_redundant_papers(papers):
    """Filter out redundant papers based on title similarity."""
    # This is a simple implementation that filters based on exact title match
    # A more sophisticated version could use string similarity algorithms
    
    unique_papers = {}
    redundant_count = 0
    
    for paper in papers:
        # Use title as the key for deduplication
        title = paper['title'].strip().lower()
        if title not in unique_papers:
            unique_papers[title] = paper
        else:
            redundant_count += 1
    
    filtered_papers = list(unique_papers.values())
    
    print(f"Filtered out {redundant_count} redundant papers")
    print(f"Remaining unique papers: {len(filtered_papers)}")
    
    return filtered_papers

def write_filtered_papers_to_csv(papers, output_file='data/paper_info_filter.csv'):
    """Write filtered papers to a CSV file."""
    # Create data directory if it doesn't exist
    Path(os.path.dirname(output_file)).mkdir(exist_ok=True)
    
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['id', 'title', 'website_url', 'download_url', 'abstract', 'authors', 'journal', 'publication_date', 'citations', 'citation_link', 'author_year'])
        for paper in papers:
            writer.writerow([
                paper['id'], 
                paper['title'], 
                paper['website_url'], 
                paper['download_url'], 
                paper['abstract'],
                paper.get('authors', ''),
                paper.get('journal', ''),
                paper.get('publication_date', ''),
                paper.get('citations', 0),
                paper.get('citation_link', ''),
                paper.get('author_year', '')
            ])
    
    print(f"Filtered paper information saved to {output_file}")

def validate_downloaded_pdfs(download_dir='papers', cache_file='data/pdf_validation_cache.json', verbose=False):
    """Check all downloaded PDF files and record their validation status."""
    if not os.path.exists(download_dir):
        print(f"Download directory {download_dir} does not exist")
        return
    
    # Create data directory if it doesn't exist
    Path(os.path.dirname(cache_file)).mkdir(exist_ok=True)
    
    # Load existing cache if it exists
    cache = {}
    if os.path.exists(cache_file):
        try:
            with open(cache_file, 'r', encoding='utf-8') as f:
                cache = json.load(f)
        except Exception as e:
            if verbose:
                print(f"Error loading cache file: {e}")
            cache = {}
    
    # Get all PDF files
    pdf_files = [f for f in os.listdir(download_dir) if f.lower().endswith('.pdf')]
    
    if not pdf_files:
        print(f"No PDF files found in {download_dir}")
        return
    
    print(f"Checking {len(pdf_files)} PDF files...")
    
    # Check each file
    valid_pdfs = []
    invalid_pdfs = []
    
    for pdf_file in pdf_files:
        file_path = os.path.join(download_dir, pdf_file)
        
        # Check file modification time
        mod_time = os.path.getmtime(file_path)
        
        # Skip if file hasn't changed since last check
        if pdf_file in cache and cache[pdf_file]['last_checked'] >= mod_time:
            if verbose:
                print(f"Skipping unchanged file: {pdf_file}")
            
            if cache[pdf_file]['is_valid']:
                valid_pdfs.append(pdf_file)
            else:
                invalid_pdfs.append(pdf_file)
            continue
        
        if verbose:
            print(f"Checking file: {pdf_file}")
            
        is_valid = is_valid_pdf_file(file_path, verbose)
        
        # Update cache
        cache[pdf_file] = {
            'is_valid': is_valid,
            'last_checked': time.time(),
            'file_size': os.path.getsize(file_path)
        }
        
        if is_valid:
            valid_pdfs.append(pdf_file)
        else:
            invalid_pdfs.append(pdf_file)
    
    # Save cache
    with open(cache_file, 'w', encoding='utf-8') as f:
        json.dump(cache, f, indent=2)
    
    # Report results
    print(f"\nPDF Validation Summary:")
    print(f"Total files checked: {len(pdf_files)}")
    print(f"Valid PDFs: {len(valid_pdfs)}")
    print(f"Invalid PDFs: {len(invalid_pdfs)}")
    
    if invalid_pdfs:
        print("\nInvalid PDF files:")
        for pdf in invalid_pdfs:
            print(f"  - {pdf}")
        
        # Offer to delete invalid PDFs
        if input("\nWould you like to delete invalid PDF files? (y/n): ").lower() == 'y':
            deleted_count = 0
            for pdf in invalid_pdfs:
                try:
                    os.remove(os.path.join(download_dir, pdf))
                    deleted_count += 1
                except Exception as e:
                    print(f"Error deleting {pdf}: {e}")
            
            print(f"Deleted {deleted_count} invalid PDF files")
    
    return valid_pdfs, invalid_pdfs

def list_failed_downloads(status_file='data/paper_status.csv', verbose=False):
    """List papers that failed to download for retry."""
    if not os.path.exists(status_file):
        print(f"Status file {status_file} not found")
        return
    
    # Create a dictionary to keep track of papers by their ID
    papers = {}
    
    # Read the status file
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            paper_id = row.get('paper_id', '')
            paper_title = row.get('paper_title', '')
            doi = row.get('DOI', '')
            status = row.get('status', '')
            
            # Store the paper information
            papers[paper_id] = {
                'title': paper_title,
                'doi': doi,
                'status': status
            }
    
    # Identify failed downloads
    failed_papers = [p for p_id, p in papers.items() if p['status'] == 'download failed']
    
    # Identify papers with unknown DOI
    unknown_doi_papers = [p for p_id, p in papers.items() 
                          if p['doi'] == 'Unknown' or not p['doi']]
    
    # Count papers with known DOI but failed download
    failed_with_doi = [p for p in failed_papers 
                       if p['doi'] != 'Unknown' and p['doi']]
    
    # Count papers with unknown DOI that failed
    failed_without_doi = [p for p in failed_papers 
                         if p['doi'] == 'Unknown' or not p['doi']]
    
    # Print statistics
    print("\nDOI Statistics:")
    print(f"Total papers: {len(papers)}")
    print(f"Papers with unknown DOI: {len(unknown_doi_papers)} ({len(unknown_doi_papers)/len(papers)*100:.1f}%)")
    print(f"Papers that failed to download: {len(failed_papers)} ({len(failed_papers)/len(papers)*100:.1f}%)")
    print(f"  - Failed with known DOI: {len(failed_with_doi)} ({len(failed_with_doi)/len(failed_papers)*100:.1f}% of failures)")
    print(f"  - Failed with unknown DOI: {len(failed_without_doi)} ({len(failed_without_doi)/len(failed_papers)*100:.1f}% of failures)")
    
    if not failed_papers:
        print("\nNo failed downloads found.")
        return
    
    # Print failed papers
    print(f"\nFound {len(failed_papers)} papers that failed to download:\n")
    print(f"{'Title':<80} {'DOI':<30}")
    print(f"{'-' * 80} {'-' * 30}")
    
    for paper in failed_papers:
        title = paper['title']
        doi = paper['doi']
        
        # Truncate long titles
        if len(title) > 77:
            title = title[:77] + "..."
            
        print(f"{title:<80} {doi:<30}")
    
    # Save the failed papers to a CSV for easy retry
    retry_file = os.path.join(os.path.dirname(status_file), 'retry_downloads.csv')
    with open(retry_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['paper_title', 'DOI'])
        for paper in failed_papers:
            writer.writerow([paper['title'], paper['doi']])
    
    print(f"\nRetry information saved to {retry_file}")
    print("\nYou can manually download these papers or use other tools to fetch them.")

def search_arxiv_by_title(title, verbose=False):
    """Search for a paper on arxiv.org by its title and return the PDF URL if found."""
    if not title:
        return None
    
    if verbose:
        print(f"Searching arXiv for: {title}")
    
    # Clean up the title for the search query
    cleaned_title = title.replace(':', ' ').replace('-', ' ').replace('/', ' ')
    query = '+'.join(cleaned_title.split())
    search_url = f"https://arxiv.org/search/?query={query}&searchtype=title"
    
    try:
        if verbose:
            print(f"Searching with URL: {search_url}")
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        session = create_session_with_retries()
        response = session.get(search_url, headers=headers, timeout=30)
        response.raise_for_status()
        
        # Parse the HTML response
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # Find search results
        results = soup.find_all('li', class_='arxiv-result')
        
        if not results:
            if verbose:
                print("No results found on arXiv")
            return None
        
        if verbose:
            print(f"Found {len(results)} potential matches on arXiv")
        
        # Function to calculate similarity between titles
        def title_similarity(title1, title2):
            """Calculate similarity between two titles."""
            if not title1 or not title2:
                return 0
            
            t1 = title1.lower()
            t2 = title2.lower()
            words1 = set(t1.split())
            words2 = set(t2.split())
            common_words = words1.intersection(words2)
            
            if not words1 or not words2:
                return 0
            
            return len(common_words) / max(len(words1), len(words2))
        
        best_match = None
        best_similarity = 0
        
        for result in results:
            # Extract the title
            title_element = result.find('p', class_='title')
            if not title_element:
                continue
            
            result_title = title_element.text.strip()
            similarity = title_similarity(title, result_title)
            
            if verbose:
                print(f"Comparing: '{title}' with '{result_title}' - Similarity: {similarity:.2f}")
            
            if similarity > best_similarity and similarity > 0.6:  # Threshold for a good match
                best_similarity = similarity
                
                # Find the arXiv ID - try multiple methods
                
                # Method 1: Find the link to the abstract page
                link_element = result.find('a', {'href': True, 'title': 'Abstract'})
                if not link_element:
                    # Method 2: Find the link containing 'abs' in the href
                    links = result.find_all('a', href=lambda href: href and '/abs/' in href)
                    if links:
                        link_element = links[0]
                    else:
                        # Method 3: Find the link in the title element
                        title_links = title_element.find_all('a', href=True)
                        if title_links:
                            link_element = title_links[0]
                
                if link_element and link_element.get('href'):
                    href = link_element['href']
                    # Print the href for debugging
                    if verbose:
                        print(f"Found link with href: {href}")
                    
                    # Extract arXiv ID from URL - improved regex to catch more formats
                    match = re.search(r'/abs/([0-9]{4}\.[0-9]{4,5}(?:v[0-9]+)?)', href)
                    if match:
                        arxiv_id = match.group(1)
                        if verbose:
                            print(f"Extracted arXiv ID: {arxiv_id}")
                        best_match = {
                            'title': result_title,
                            'id': arxiv_id,
                            'similarity': similarity,
                            'pdf_url': f"https://arxiv.org/pdf/{arxiv_id}.pdf"
                        }
                    else:
                        # Alternative extraction method if regex fails
                        parts = href.split('/abs/')
                        if len(parts) > 1:
                            possible_id = parts[1].split('v')[0]
                            if re.match(r'[0-9]{4}\.[0-9]{4,5}', possible_id):
                                arxiv_id = possible_id
                                if verbose:
                                    print(f"Extracted arXiv ID using alternative method: {arxiv_id}")
                                best_match = {
                                    'title': result_title,
                                    'id': arxiv_id,
                                    'similarity': similarity,
                                    'pdf_url': f"https://arxiv.org/pdf/{arxiv_id}.pdf"
                                }
                else:
                    if verbose:
                        print(f"Could not find abstract link for paper: {result_title}")
        
        if best_match:
            if verbose:
                print(f"Best match: {best_match['title']} (Similarity: {best_match['similarity']:.2f})")
                print(f"arXiv ID: {best_match['id']}")
                print(f"PDF URL: {best_match['pdf_url']}")
            return best_match['pdf_url']
        else:
            if verbose:
                print(f"Could not extract arXiv ID for any matching papers")
            return None
    
    except Exception as e:
        if verbose:
            print(f"Error searching arXiv: {e}")
        return None

def try_arxiv_for_failed_papers(status_file='data/paper_status.csv', download_dir='papers', verbose=False):
    """Try to download papers that failed to download by searching arXiv."""
    if not os.path.exists(status_file):
        print(f"Status file {status_file} not found")
        return
    
    # Create download directory if it doesn't exist
    Path(download_dir).mkdir(exist_ok=True)
    
    # Read the status file
    failed_papers = []
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            if row.get('status') == 'download failed':
                failed_papers.append({
                    'id': row.get('paper_id', ''),
                    'title': row.get('paper_title', ''),
                    'doi': row.get('DOI', '')
                })
    
    if not failed_papers:
        print("No failed downloads found to retry")
        return
    
    print(f"Found {len(failed_papers)} papers that failed to download. Searching arXiv...")
    
    # Track success and failures
    success_count = 0
    still_failed = []
    
    # Try to download each paper from arXiv
    for i, paper in enumerate(failed_papers):
        print(f"Processing paper {i+1}/{len(failed_papers)}: {paper['title'][:50]}...")
        
        # Search arXiv for the paper
        pdf_url = search_arxiv_by_title(paper['title'], verbose)
        
        if pdf_url:
            print(f"Found on arXiv: {pdf_url}")
            
            # Attempt to download
            sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
            filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
            
            try:
                session = create_session_with_retries()
                response = session.get(pdf_url, stream=True, timeout=5)
                response.raise_for_status()
                
                with open(filename, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        f.write(chunk)
                
                # Verify the downloaded file is a valid PDF
                if is_valid_pdf_file(filename, verbose):
                    print(f"Successfully downloaded from arXiv to: {filename}")
                    success_count += 1
                    
                    # Update status in the status file
                    update_paper_status(paper['id'], 'downloaded')
                else:
                    print(f"Downloaded file from arXiv is not a valid PDF: {filename}")
                    os.remove(filename)
                    still_failed.append(paper)
            
            except Exception as e:
                print(f"Failed to download from arXiv: {e}")
                # Clean up any partially downloaded file
                if os.path.exists(filename):
                    os.remove(filename)
                still_failed.append(paper)
        else:
            print(f"Paper not found on arXiv")
            still_failed.append(paper)
    
    # Print summary
    print("\narXiv Download Summary:")
    print(f"Total papers attempted: {len(failed_papers)}")
    print(f"Successfully downloaded: {success_count}")
    print(f"Still failed: {len(still_failed)}")
    
    if success_count > 0:
        print("\nValidating all downloaded PDF files...")
        validate_downloaded_pdfs(verbose=verbose)

def update_paper_status(paper_id, new_status, status_file='data/paper_status.csv'):
    """Update the status of a paper in the status file."""
    if not os.path.exists(status_file):
        return False
    
    # Read all records
    records = []
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            if row.get('paper_id') == paper_id:
                row['status'] = new_status
            records.append(row)
    
    # Write back all records
    with open(status_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.DictWriter(f, fieldnames=['paper_id', 'paper_title', 'DOI', 'status'])
        writer.writeheader()
        for record in records:
            writer.writerow(record)
    
    return True

def query_download_status(status_file='data/paper_status.csv', info_file='data/paper_info.csv', download_dir='papers', verbose=False):
    """Query download status and print information about failed downloads classified by source website."""
    if not os.path.exists(status_file):
        print(f"Status file {status_file} not found. No download information available.")
        return
    
    if not os.path.exists(info_file):
        print(f"Paper info file {info_file} not found. Cannot classify by source website.")
        return
    
    # Create download directory if it doesn't exist (to avoid errors)
    Path(download_dir).mkdir(exist_ok=True)
    
    # Read the status file
    download_statuses = {}
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            paper_id = row.get('paper_id', '')
            status = row.get('status', '')
            download_statuses[paper_id] = {
                'title': row.get('paper_title', ''),
                'status': status,
                'doi': row.get('DOI', '')
            }
    
    # Read the paper info file to get URLs
    paper_info = {}
    with open(info_file, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        header = next(reader)
        id_idx = header.index('id') if 'id' in header else 0
        title_idx = header.index('title') if 'title' in header else 1
        website_url_idx = header.index('website_url') if 'website_url' in header else 2
        download_url_idx = header.index('download_url') if 'download_url' in header else 3
        
        for row in reader:
            if len(row) > max(id_idx, website_url_idx, download_url_idx, title_idx):
                paper_id = row[id_idx]
                paper_info[paper_id] = {
                    'title': row[title_idx],
                    'website_url': row[website_url_idx],
                    'download_url': row[download_url_idx]
                }
    
    # Get list of downloaded PDF files
    pdf_files = [f.lower() for f in os.listdir(download_dir) if f.lower().endswith('.pdf')]
    
    # Count different statuses
    status_counts = {}
    for paper_id, info in download_statuses.items():
        status = info['status']
        if status not in status_counts:
            status_counts[status] = 0
        status_counts[status] += 1
    
    # Print summary
    print("\nDownload Status Summary:")
    print(f"Total papers tracked: {len(download_statuses)}")
    for status, count in status_counts.items():
        print(f"{status}: {count} papers")
    
    # Find papers that failed to download
    failed_papers = []
    for paper_id, status_info in download_statuses.items():
        if status_info['status'] == 'download failed':
            # Get paper title and sanitize it to check if it exists on disk
            title = status_info['title']
            sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", title)
            sanitized_title = sanitized_title[:100].lower() + ".pdf"
            
            # Check if this paper might already be downloaded despite status
            if any(sanitized_title.lower() in pdf_file.lower() for pdf_file in pdf_files):
                # If file exists, update status in memory (we won't write back to CSV now)
                print(f"Note: Paper '{title[:50]}...' marked as failed but PDF exists - skipping")
                continue
            
            paper = {
                'id': paper_id,
                'title': title,
                'doi': status_info['doi'],
                'website_url': '',
                'download_url': ''
            }
            
            # Add URL information if available
            if paper_id in paper_info:
                paper['website_url'] = paper_info[paper_id]['website_url']
                paper['download_url'] = paper_info[paper_id]['download_url']
                
            failed_papers.append(paper)
    
    if not failed_papers:
        print("\nNo failed downloads found!")
        return
        
    # Classify papers by source website
    sources = {
        'ieee': {'name': 'IEEE Explore', 'domain': 'ieeexplore.ieee.org', 'papers': []},
        'springer': {'name': 'Springer', 'domain': 'link.springer.com', 'papers': []},
        'sciencedirect': {'name': 'ScienceDirect', 'domain': 'sciencedirect.com', 'papers': []},
        'acm': {'name': 'ACM Digital Library', 'domain': 'dl.acm.org', 'papers': []},
        'researchgate': {'name': 'ResearchGate', 'domain': 'researchgate.net', 'papers': []},
        'arxiv': {'name': 'arXiv', 'domain': 'arxiv.org', 'papers': []},
        'other': {'name': 'Other Sources', 'domain': '', 'papers': []}
    }
    
    # Classify each paper
    for paper in failed_papers:
        classified = False
        
        # First try website_url
        url = paper['website_url']
        if not url:
            # If no website_url, try download_url
            url = paper['download_url']
        
        if not url:
            sources['other']['papers'].append(paper)
            continue
            
        # Check each source
        for source_key, source_info in sources.items():
            if source_key != 'other' and source_info['domain'] in url:
                sources[source_key]['papers'].append(paper)
                classified = True
                break
                
        if not classified:
            sources['other']['papers'].append(paper)
    
    # Output results by source
    total_failed = len(failed_papers)
    print(f"\nPapers failed to download ({total_failed}) by source:")
    
    for source_key, source_info in sources.items():
        papers = source_info['papers']
        if papers:
            print(f"\n@{source_info['domain'] if source_info['domain'] else 'unknown'} - {source_info['name']} ({len(papers)} papers):")
            print(f"{'Title':<80} {'URL'}")
            print(f"{'-' * 80} {'-' * 50}")
            
            for paper in papers:
                title = paper['title']
                if len(title) > 77:
                    title = title[:77] + "..."
                
                # Use the most informative URL
                url = paper['website_url'] if paper['website_url'] else paper['download_url']
                    
                print(f"{title:<80} {url}")
    
    # Save to CSV by source
    output_file = os.path.join(os.path.dirname(status_file), 'failed_downloads_by_source.csv')
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['source', 'paper_id', 'title', 'website_url', 'download_url', 'doi'])
        
        for source_key, source_info in sources.items():
            for paper in source_info['papers']:
                writer.writerow([
                    source_info['domain'] if source_info['domain'] else 'unknown',
                    paper['id'],
                    paper['title'],
                    paper['website_url'],
                    paper['download_url'],
                    paper['doi']
                ])
    
    print(f"\nDetailed classification saved to {output_file}")
    print("\nNote: To update status of papers that are marked as failed but actually downloaded,")
    print("run the script with --validate-pdfs option to refresh the status information.")

def list_pdf_fail_papers(status_file='data/paper_status.csv', info_file='data/paper_info.csv', verbose=False):
    """List papers that have PDF download links but failed to download."""
    # Check if required files exist
    if not os.path.exists(status_file) or not os.path.exists(info_file):
        print(f"Required files not found: {status_file} and/or {info_file}")
        return
    
    # Read paper status
    status_dict = {}
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            paper_id = row.get('paper_id', '')
            status = row.get('status', '')
            if paper_id and status:
                status_dict[paper_id] = status
    
    # Read paper info to get download URLs
    papers_with_pdf_links = []
    with open(info_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.reader(f)
        headers = next(reader)
        download_url_idx = headers.index('download_url') if 'download_url' in headers else 3
        title_idx = headers.index('title') if 'title' in headers else 1
        
        for row in reader:
            if len(row) > max(download_url_idx, title_idx):
                paper_id = row[0]
                title = row[title_idx]
                download_url = row[download_url_idx]
                
                # Check if the paper has a PDF link but failed to download
                if download_url and paper_id in status_dict and status_dict[paper_id] == 'download failed':
                    papers_with_pdf_links.append({
                        'id': paper_id,
                        'title': title,
                        'download_url': download_url
                    })
    
    if not papers_with_pdf_links:
        print("No papers found with PDF links that failed to download.")
        return
    
    # Print results
    print(f"\nFound {len(papers_with_pdf_links)} papers with PDF links that failed to download:\n")
    print(f"{'Title':<80} {'Download URL'}")
    print(f"{'-' * 80} {'-' * 50}")
    
    for paper in papers_with_pdf_links:
        title = paper['title']
        url = paper['download_url']
        
        # Truncate long titles only
        if len(title) > 77:
            title = title[:77] + "..."
            
        print(f"{title:<80} {url}")
    
    # Save to CSV
    output_file = os.path.join(os.path.dirname(status_file), 'pdf_fail_papers.csv')
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['paper_id', 'title', 'download_url'])
        for paper in papers_with_pdf_links:
            writer.writerow([paper['id'], paper['title'], paper['download_url']])
    
    print(f"\nList saved to {output_file}")

def list_acm_papers(status_file='data/paper_status.csv', info_file='data/paper_info.csv', verbose=False):
    """List papers from dl.acm.org that failed to download."""
    # Check if required files exist
    if not os.path.exists(status_file) or not os.path.exists(info_file):
        print(f"Required files not found: {status_file} and/or {info_file}")
        return
    
    # Read paper status
    status_dict = {}
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            paper_id = row.get('paper_id', '')
            status = row.get('status', '')
            if paper_id and status:
                status_dict[paper_id] = status
    
    # Read paper info to find ACM papers that failed
    acm_papers = []
    with open(info_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.reader(f)
        headers = next(reader)
        website_url_idx = headers.index('website_url') if 'website_url' in headers else 2
        title_idx = headers.index('title') if 'title' in headers else 1
        
        for row in reader:
            if len(row) > max(website_url_idx, title_idx):
                paper_id = row[0]
                title = row[title_idx]
                website_url = row[website_url_idx]
                
                # Check if the paper is from ACM and failed to download
                if 'dl.acm.org' in website_url and paper_id in status_dict and status_dict[paper_id] == 'download failed':
                    acm_papers.append({
                        'id': paper_id,
                        'title': title,
                        'website_url': website_url
                    })
    
    if not acm_papers:
        print("No papers found from dl.acm.org that failed to download.")
        return
    
    # Print results
    print(f"\nFound {len(acm_papers)} papers from dl.acm.org that failed to download:\n")
    print(f"{'Title':<80} {'ACM URL'}")
    print(f"{'-' * 80} {'-' * 50}")
    
    for paper in acm_papers:
        title = paper['title']
        url = paper['website_url']
        
        # Truncate long titles only
        if len(title) > 77:
            title = title[:77] + "..."
            
        print(f"{title:<80} {url}")
    
    # Save to CSV
    output_file = os.path.join(os.path.dirname(status_file), 'acm_fail_papers.csv')
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['paper_id', 'title', 'website_url'])
        for paper in acm_papers:
            writer.writerow([paper['id'], paper['title'], paper['website_url']])
    
    print(f"\nList saved to {output_file}")

def extract_title_from_pdf(pdf_path, verbose=False):
    """Extract title from a PDF file."""
    if not PDF_PARSING_AVAILABLE:
        print("PDF parsing libraries not available. Cannot extract title.")
        return None
    
    if verbose:
        print(f"Attempting to extract title from PDF: {pdf_path}")
    
    # Check if it's a valid PDF
    if not is_valid_pdf_file(pdf_path, verbose):
        print(f"The file is not a valid PDF: {pdf_path}")
        return None
    
    title = None
    
    # Blacklist patterns for false positives
    blacklist_patterns = [
        r"see discussions?",
        r"https?://www\.researchgate\.net",
        r"this publication at",
        r"profiles? for this",
        r"author profiles",
        r"copyright\s+\d{4}",
        r"all rights reserved",
        r"^\d+$",  # Just a number
        r"^page \d+$",  # Just a page number
        r"^[0-9\-\.]+$",  # Just numbers and punctuation
        r"^proceedings of",
        r"^preprint\s+",
        r"^paper\s+\d+",
        r"^abstract\s*$",
        r"^--manuscript draft--$",  # Common in drafts
        r"^manuscript number",      # Manuscript metadata
        r"^article type",          # Article metadata
        r"^keywords",              # Keywords section
        r"^powered by",            # Publishing system info
        r"insert your title here",  # Common placeholder
        r"title of (your|the) paper", # Common placeholder
        r"your paper title",       # Common placeholder
        r"untitled",               # Common placeholder
        r"click here to edit",     # Common placeholder
    ]
    
    # Additional template placeholders that should be rejected
    template_titles = [
        "insert your title here",
        "your paper title",
        "title of your paper",
        "untitled document",
        "click here to edit title",
        "enter title here",
        "title goes here",
        "template",
    ]
    
    # Function to check if a string matches any blacklist pattern
    def is_blacklisted(text):
        if not text or len(text) < 5:
            return True
        text_lower = text.lower()
        
        # Check against exact template titles
        if text_lower in template_titles:
            return True
            
        # Check against regex patterns
        for pattern in blacklist_patterns:
            if re.search(pattern, text_lower):
                return True
        return False
    
    # Track all candidates even if we find metadata
    all_candidates = []
    
    # Try using PyPDF2 first
    try:
        with open(pdf_path, 'rb') as f:
            pdf_reader = PyPDF2.PdfReader(f)
            
            # Method 1: Try to get from document info/metadata
            if pdf_reader.metadata:
                meta_title = pdf_reader.metadata.get('/Title')
                if meta_title and isinstance(meta_title, str) and len(meta_title) > 5 and not is_blacklisted(meta_title):
                    if verbose:
                        print(f"Found title in PDF metadata: '{meta_title}'")
                    # Add as a candidate but don't immediately return - check content for better titles
                    all_candidates.append((meta_title, 15))  # High score but not highest
            
            # Method 2: Try first few pages with various heuristics
            candidates = []
            journal_title = None  # For storing potential journal title
            article_title = None  # For storing potential article title
            
            # Process first few pages for candidates
            for i in range(min(3, len(pdf_reader.pages))):
                text = pdf_reader.pages[i].extract_text()
                if not text:
                    continue
                
                # Split by lines and clean
                lines = [line.strip() for line in text.split('\n')]
                lines = [line for line in lines if line and len(line) > 10 and len(line) < 250]
                
                # Process first page differently - title usually at the top
                if i == 0 and lines:
                    # Check for multiline titles (top consecutive lines with similar formatting)
                    multiline_candidates = []
                    
                    # Process potential multiline titles
                    current_title_lines = []
                    for idx, line in enumerate(lines[:10]):  # Consider first 10 lines to find multiline titles
                        if not is_blacklisted(line):
                            # Similar formatting characteristics to detect consecutive title lines
                            is_title_like = (line.istitle() or line.isupper() or
                                            re.match(r'^[A-Z][^.!?]*[A-Za-z]$', line))
                            
                            if is_title_like:
                                current_title_lines.append(line)
                            elif current_title_lines:  # End of a title block
                                if len(current_title_lines) > 1:  # We found a multiline title
                                    # Merge multiline titles into a single line with separator
                                    merged_title = ' - '.join(current_title_lines)
                                    # If title is too long, keep first line and truncate
                                    if len(merged_title) > 120:
                                        merged_title = current_title_lines[0]
                                    # Higher score for earlier multiline titles and more lines
                                    multiline_score = 15 - idx + len(current_title_lines)
                                    multiline_candidates.append((merged_title, multiline_score))
                                current_title_lines = []
                    
                    # Don't forget to add the last set of title lines if we reach the end
                    if len(current_title_lines) > 1:
                        # Merge multiline titles into a single line with separator
                        merged_title = ' - '.join(current_title_lines)
                        # If title is too long, keep first line and truncate
                        if len(merged_title) > 120:
                            merged_title = current_title_lines[0]
                        multiline_score = 15 - len(lines[:10]) + len(current_title_lines)
                        multiline_candidates.append((merged_title, multiline_score))
                    
                    # Special handling for academic papers: first line might be journal name, second might be article title
                    top_lines = [line for line in lines[:5] if not is_blacklisted(line)]
                    if len(top_lines) >= 2:
                        # First non-blacklisted line might be journal title
                        journal_title = top_lines[0]
                        # Second non-blacklisted line might be article title
                        article_title = top_lines[1]
                        # If there's a third line that looks like article subtitle, include it
                        if len(top_lines) >= 3 and len(top_lines[2]) > 10:
                            # Check if the third line might be a continuation of the title
                            if not re.match(r'^(by|author|abstract|introduction|keywords)', top_lines[2].lower()):
                                article_title += " - " + top_lines[2]
                        
                        # Add combined journal + article title as a high-scoring candidate
                        combined_title = f"{journal_title} - {article_title}"
                        candidates.append((combined_title, 18))  # Highest score for combined title
                    
                    # Add individual top lines as candidates
                    for idx, line in enumerate(lines[:5]):
                        if not is_blacklisted(line):
                            # Higher score for earlier lines on first page
                            candidates.append((line, 10-idx))
                    
                    # Add multiline candidates with boosted scores
                    candidates.extend(multiline_candidates)
                
                # Look for specific title patterns
                for idx, line in enumerate(lines):
                    # Look for lines with capitalization patterns typical of titles
                    if re.match(r'^[A-Z][^.!?]*[A-Za-z]$', line) and not is_blacklisted(line):
                        # Title case or ALL CAPS are good indicators
                        if line.istitle() or line.isupper():
                            candidates.append((line, 8))
                        else:
                            candidates.append((line, 5))
                    
                    # Look for "Title:" pattern
                    title_pattern = re.search(r'(?:TITLE|Title):\s*([^\n]+)', line, re.IGNORECASE)
                    if title_pattern:
                        title_candidate = title_pattern.group(1).strip()
                        if not is_blacklisted(title_candidate):
                            candidates.append((title_candidate, 12))  # Higher score for explicit title
            
            # Look for title in submission text
            for idx, line in enumerate(lines):
                if "submit" in line.lower() and "entitled" in line.lower():
                    # This is likely a submission cover which might contain the title
                    title_match = re.search(r'entitled:?\s*"([^"]+)"', line, re.IGNORECASE)
                    if title_match:
                        submission_title = title_match.group(1).strip()
                        if not is_blacklisted(submission_title):
                            candidates.append((submission_title, 12))  # High score for explicit submission title
            
            # Add all content-based candidates
            all_candidates.extend(candidates)
                
    except Exception as e:
        if verbose:
            print(f"Error extracting title with PyPDF2: {e}")
    
    # If PyPDF2 fails or we want to double-check with pdfminer, try it
    try:
        with open(pdf_path, 'rb') as f:
            text = pdf_extract_text(f, maxpages=2)
            
            if not text:
                if verbose:
                    print("No text extracted with pdfminer")
            else:
                # Parse with same heuristics as above
                candidates = []
                lines = [line.strip() for line in text.split('\n')]
                lines = [line for line in lines if line and len(line) > 10 and len(line) < 250]
                
                # Check for multiline titles (top consecutive lines with similar formatting)
                multiline_candidates = []
                journal_title = None
                article_title = None
                
                # Process potential multiline titles
                current_title_lines = []
                for idx, line in enumerate(lines[:10]):  # Consider first 10 lines to find multiline titles
                    if not is_blacklisted(line):
                        # Similar formatting characteristics to detect consecutive title lines
                        is_title_like = (line.istitle() or line.isupper() or
                                        re.match(r'^[A-Z][^.!?]*[A-Za-z]$', line))
                        
                        if is_title_like:
                            current_title_lines.append(line)
                        elif current_title_lines:  # End of a title block
                            if len(current_title_lines) > 1:  # We found a multiline title
                                # Merge multiline titles into a single line with separator
                                merged_title = ' - '.join(current_title_lines)
                                # If title is too long, keep first line and truncate
                                if len(merged_title) > 120:
                                    merged_title = current_title_lines[0]
                                # Higher score for earlier multiline titles and more lines
                                multiline_score = 15 - idx + len(current_title_lines)
                                multiline_candidates.append((merged_title, multiline_score))
                            current_title_lines = []
                
                # Don't forget to add the last set of title lines if we reach the end
                if len(current_title_lines) > 1:
                    # Merge multiline titles into a single line with separator
                    merged_title = ' - '.join(current_title_lines)
                    # If title is too long, keep first line and truncate
                    if len(merged_title) > 120:
                        merged_title = current_title_lines[0]
                    multiline_score = 15 - len(lines[:10]) + len(current_title_lines)
                    multiline_candidates.append((merged_title, multiline_score))
                
                # Special handling for academic papers: first line might be journal name, second might be article title
                top_lines = [line for line in lines[:5] if not is_blacklisted(line)]
                if len(top_lines) >= 2:
                    # First non-blacklisted line might be journal title
                    journal_title = top_lines[0]
                    # Second non-blacklisted line might be article title
                    article_title = top_lines[1]
                    # If there's a third line that looks like article subtitle, include it
                    if len(top_lines) >= 3 and len(top_lines[2]) > 10:
                        # Check if the third line might be a continuation of the title
                        if not re.match(r'^(by|author|abstract|introduction|keywords)', top_lines[2].lower()):
                            article_title += " - " + top_lines[2]
                    
                    # Add combined journal + article title as a high-scoring candidate
                    combined_title = f"{journal_title} - {article_title}"
                    candidates.append((combined_title, 18))  # Highest score for combined title
                
                # First 5 lines are good candidates for titles
                for idx, line in enumerate(lines[:5]):
                    if not is_blacklisted(line):
                        candidates.append((line, 10-idx))
                
                # Add multiline candidates with boosted scores
                candidates.extend(multiline_candidates)
                
                # Look for title in submission text
                for idx, line in enumerate(lines):
                    if "submit" in line.lower() and "entitled" in line.lower():
                        # This is likely a submission cover which might contain the title
                        title_match = re.search(r'entitled:?\s*"([^"]+)"', line, re.IGNORECASE)
                        if title_match:
                            submission_title = title_match.group(1).strip()
                            if not is_blacklisted(submission_title):
                                candidates.append((submission_title, 12))  # High score for explicit submission title
                
                # Look for title patterns
                for line in lines:
                    # Title case or ALL CAPS are good indicators
                    if re.match(r'^[A-Z][^.!?]*[A-Za-z]$', line) and not is_blacklisted(line):
                        if line.istitle() or line.isupper():
                            candidates.append((line, 8))
                        else:
                            candidates.append((line, 5))
                    
                    # Look for "Title:" pattern
                    title_pattern = re.search(r'(?:TITLE|Title):\s*([^\n]+)', line, re.IGNORECASE)
                    if title_pattern:
                        title_candidate = title_pattern.group(1).strip()
                        if not is_blacklisted(title_candidate):
                            candidates.append((title_candidate, 12))  # Higher score for explicit title
                
                # Add pdfminer candidates
                all_candidates.extend(candidates)
                
    except Exception as e:
        if verbose:
            print(f"Error extracting title with pdfminer: {e}")
    
    # If we have candidates, pick the highest scored one
    if all_candidates:
        if verbose:
            print("All title candidates:")
            for i, (candidate, score) in enumerate(sorted(all_candidates, key=lambda x: (-x[1], len(x[0])))):
                print(f"  {i+1}. Score {score}: '{candidate}'")
        
        all_candidates.sort(key=lambda x: (-x[1], len(x[0])))  # Sort by score (high to low), then by length (short to long)
        title = all_candidates[0][0]
        if verbose:
            print(f"Selected title: '{title}'")
        return title
    
    # Last resort: use PDF filename as the title but clean it up
    try:
        filename = os.path.basename(pdf_path)
        filename_without_ext = os.path.splitext(filename)[0]
        if filename_without_ext:
            # Remove patterns like timestamps, UUIDs, or paper IDs
            cleaned = re.sub(r'\b([0-9a-f]{8,}|[A-Z0-9]+-\d+|\d{4}\.\d+v\d+)\b', '', filename_without_ext)
            # Remove special characters
            cleaned = re.sub(r'[_\-\.]+', ' ', cleaned).strip()
            if cleaned and len(cleaned) > 3:
                title = cleaned
                if verbose:
                    print(f"Using cleaned filename as title: '{title}'")
                return title
            else:
                title = filename_without_ext.replace('_', ' ').strip()
                if verbose:
                    print(f"Using filename as title: '{title}'")
                return title
    except Exception as e:
        if verbose:
            print(f"Error using filename as title: {e}")
    
    if verbose:
        print("Could not extract a title from the PDF")
    return None

def import_pdf_directory(directory_path, download_dir='papers', verbose=False):
    """Import all PDF files from a directory."""
    if not os.path.exists(directory_path):
        print(f"Error: Directory not found: {directory_path}")
        return False
    
    # Get all PDF files in the directory
    pdf_files = []
    try:
        for root, _, files in os.walk(directory_path):
            for file in files:
                if file.lower().endswith('.pdf'):
                    pdf_files.append(os.path.join(root, file))
    except Exception as e:
        print(f"Error scanning directory: {e}")
        return False
    
    if not pdf_files:
        print(f"No PDF files found in directory: {directory_path}")
        return False
    
    print(f"Found {len(pdf_files)} PDF files in {directory_path}")
    
    # Import each PDF file
    success_count = 0
    failed_count = 0
    
    for i, pdf_path in enumerate(pdf_files):
        print(f"Processing file {i+1}/{len(pdf_files)}: {os.path.basename(pdf_path)}")
        try:
            if import_pdf_file(pdf_path, download_dir, verbose):
                success_count += 1
            else:
                failed_count += 1
        except Exception as e:
            print(f"Error processing {pdf_path}: {e}")
            failed_count += 1
    
    # Print summary
    print("\nImport Summary:")
    print(f"Total PDF files found: {len(pdf_files)}")
    print(f"Successfully imported: {success_count}")
    print(f"Failed to import: {failed_count}")
    
    return success_count > 0

def import_pdf_file(pdf_path, download_dir='papers', verbose=False):
    """Import a PDF file, identify its title, and update the status."""
    if not os.path.exists(pdf_path):
        print(f"Error: PDF file not found: {pdf_path}")
        return False
    
    # Create download directory if it doesn't exist
    Path(download_dir).mkdir(exist_ok=True)
    
    # Extract title from PDF
    title = extract_title_from_pdf(pdf_path, verbose)
    if not title:
        print("Could not extract title from PDF. Using filename as title.")
        filename = os.path.basename(pdf_path)
        title = os.path.splitext(filename)[0].replace('_', ' ').strip()
    
    # Check if it's a valid PDF
    if not is_valid_pdf_file(pdf_path, verbose):
        print(f"The file is not a valid PDF: {pdf_path}")
        return False
    
    # Sanitize title for filename
    sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", title)
    new_filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
    
    # Copy the file to papers directory
    import shutil
    try:
        shutil.copy2(pdf_path, new_filename)
        print(f"Imported PDF to: {new_filename}")
    except Exception as e:
        print(f"Error copying PDF file: {e}")
        return False
    
    # Find if the paper already exists in paper_info.csv and paper_status.csv
    paper_id = None
    paper_exists = False
    
    # Determine paths for paper_info.csv and paper_status.csv
    data_dir = os.path.dirname(download_dir) if download_dir != 'papers' else 'data'
    paper_info_file = os.path.join(data_dir, 'paper_info.csv')
    paper_status_file = os.path.join(data_dir, 'paper_status.csv')
    
    # Check paper_info.csv
    if os.path.exists(paper_info_file):
        with open(paper_info_file, 'r', newline='', encoding='utf-8') as f:
            reader = csv.reader(f)
            headers = next(reader)
            title_idx = headers.index('title') if 'title' in headers else 1
            
            for row in reader:
                if len(row) > title_idx:
                    csv_title = row[title_idx]
                    similarity = title_similarity(title, csv_title)
                    
                    if similarity > 0.8:  # Title is very similar
                        paper_id = row[0]
                        paper_exists = True
                        if verbose:
                            print(f"Found matching paper in database with ID: {paper_id}")
                        break
    
    # If paper not found, generate a new ID
    if not paper_id:
        import uuid
        paper_id = str(uuid.uuid4())[:8]
        if verbose:
            print(f"Generated new paper ID: {paper_id}")
    
    # Update or add to paper_status.csv
    status_updated = False
    if paper_exists and os.path.exists(paper_status_file):
        # Update existing status
        status_updated = update_paper_status(paper_id, 'downloaded', status_file=paper_status_file)
        if status_updated and verbose:
            print(f"Updated status for paper ID: {paper_id}")
    
    # If not updated, add a new entry
    if not status_updated:
        if not os.path.exists(paper_status_file):
            # Create the status file if it doesn't exist
            Path(data_dir).mkdir(exist_ok=True)
            with open(paper_status_file, 'w', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow(['paper_id', 'paper_title', 'DOI', 'status'])
        
        # Append the new paper to the status file
        with open(paper_status_file, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow([paper_id, title, 'Unknown', 'downloaded'])
        
        if verbose:
            print(f"Added new entry to status file with ID: {paper_id}")
    
    # If the paper doesn't exist in paper_info.csv, add it
    if not paper_exists and os.path.exists(paper_info_file):
        with open(paper_info_file, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow([
                paper_id, 
                title, 
                '', # website_url
                '', # download_url
                '', # abstract
                '', # authors
                '', # journal
                '', # publication_date
                0,  # citations
                ''  # citation_link
            ])
        
        if verbose:
            print(f"Added new entry to paper info file with ID: {paper_id}")
    
    print(f"Successfully imported PDF: {title}")
    return True

def title_similarity(title1, title2):
    """Calculate similarity between two titles."""
    if not title1 or not title2:
        return 0
    
    t1 = title1.lower()
    t2 = title2.lower()
    words1 = set(t1.split())
    words2 = set(t2.split())
    common_words = words1.intersection(words2)
    
    if not words1 or not words2:
        return 0
    
    return len(common_words) / max(len(words1), len(words2))

def update_download_status(download_dir='papers', verbose=False):
    """Scan the papers directory and update status for files that exist but are marked as failed downloads."""
    # Determine paths for paper_status.csv
    data_dir = os.path.dirname(download_dir) if download_dir != 'papers' else 'data'
    paper_status_file = os.path.join(data_dir, 'paper_status.csv')
    
    if not os.path.exists(paper_status_file):
        print("Error: paper_status.csv not found. No status to update.")
        return
    
    if not os.path.exists(download_dir):
        print(f"Error: Papers directory '{download_dir}' not found.")
        return
    
    # Get all PDF files in the papers directory
    pdf_files = []
    for file in os.listdir(download_dir):
        if file.lower().endswith('.pdf'):
            pdf_files.append(file)
    
    if not pdf_files:
        print(f"No PDF files found in {download_dir}/")
        return
    
    # Read the paper status data
    papers_to_update = []
    updated_count = 0
    
    try:
        # Load paper status data
        status_data = []
        with open(paper_status_file, 'r', newline='', encoding='utf-8') as f:
            reader = csv.reader(f)
            headers = next(reader)  # Get headers
            
            # Check column indices
            paper_id_idx = headers.index('paper_id') if 'paper_id' in headers else 0
            title_idx = headers.index('paper_title') if 'paper_title' in headers else 1
            status_idx = headers.index('status') if 'status' in headers else -1
            
            if status_idx == -1:
                print("Error: 'status' column not found in paper_status.csv")
                return
            
            # Read all data
            for row in reader:
                if len(row) > max(paper_id_idx, title_idx, status_idx):
                    status_data.append(row)
        
        # Find papers marked as failed that might now be in the papers directory
        for row in status_data:
            paper_id = row[paper_id_idx]
            paper_title = row[title_idx]
            status = row[status_idx]
            
            if status.lower() == 'download failed':
                # Check if a matching PDF exists in the papers directory
                sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper_title)
                
                # Check for potential truncated filenames - some PDFs might have been saved with truncated names
                found_match = False
                for pdf_file in pdf_files:
                    # Remove extension to compare
                    pdf_name_without_ext = os.path.splitext(pdf_file)[0]
                    
                    # Compare with title similarity to account for minor differences
                    similarity = title_similarity(sanitized_title, pdf_name_without_ext)
                    
                    if similarity > 0.7:  # High similarity threshold
                        if verbose:
                            print(f"Found matching PDF for paper: {paper_title}")
                            print(f"  Matched file: {pdf_file}")
                            print(f"  Similarity score: {similarity:.2f}")
                        
                        papers_to_update.append((paper_id, paper_title, pdf_file))
                        found_match = True
                        break
                
                if not found_match and verbose:
                    # Try to find close matches for debugging purposes
                    close_matches = [(pdf_file, title_similarity(sanitized_title, os.path.splitext(pdf_file)[0])) 
                                    for pdf_file in pdf_files]
                    close_matches.sort(key=lambda x: -x[1])  # Sort by similarity (highest first)
                    
                    if close_matches and close_matches[0][1] > 0.4:  # If we have any decent matches, show them
                        print(f"No exact match for paper: {paper_title}")
                        print(f"  Closest files (with similarity scores):")
                        for pdf_file, score in close_matches[:3]:  # Show top 3
                            print(f"  - {pdf_file} (similarity: {score:.2f})")
        
        # Update status for matched papers
        if papers_to_update:
            print(f"Found {len(papers_to_update)} papers to update:")
            for paper_id, paper_title, pdf_file in papers_to_update:
                print(f"  • {paper_title}")
                if update_paper_status(paper_id, 'downloaded'):
                    updated_count += 1
            
            print(f"\nSuccessfully updated status for {updated_count} papers.")
        else:
            print("No papers found that need status updates.")
    
    except Exception as e:
        print(f"Error updating paper status: {e}")

@contextmanager
def timeout(seconds):
    def signal_handler(signum, frame):
        raise TimeoutError("Timed out!")
    signal.signal(signal.SIGALRM, signal_handler)
    signal.alarm(seconds)
    try:
        yield
    finally:
        signal.alarm(0)

def try_download_acm_papers(status_file='data/paper_status.csv', info_file='data/paper_info.csv', download_dir='papers', verbose=False):
    """Try to download papers from ACM Digital Library that failed to download."""
    # This function would contain the implementation for downloading ACM papers
    # For now just print a message
    print("ACM Digital Library paper download not yet implemented")
    print(f"Would attempt to download papers to: {download_dir}")
    print(f"Using status file: {status_file}")
    print(f"Using info file: {info_file}")
    return

import pandas as pd
from difflib import SequenceMatcher
from pathlib import Path

def check_redundant_papers(data_dir, verbose=False):
    """
    Check for redundant papers in the paper_info.csv file.
    
    Args:
        data_dir (str or Path): Directory containing the paper_info.csv file.
        verbose (bool): If True, print detailed output.
    """
    data_dir = Path(data_dir)
    
    # Load the CSV file
    try:
        df = pd.read_csv(data_dir / 'paper_info.csv')
    except FileNotFoundError:
        print(f"[ERROR] '{data_dir / 'paper_info.csv'}' not found in the data directory.")
        return
    except Exception as e:
        print(f"[ERROR] Failed to load CSV: {str(e)}")
        return

    print("\n=== Checking for Redundant Papers ===")
    
    # Run all checks
    check_duplicate_ids(df)
    check_duplicate_titles(df)
    check_similar_abstracts(df, similarity_threshold=0.85, show_abstracts=verbose)

    print("\n=== Redundancy Check Complete ===")

def check_duplicate_ids(df):
    """Check for duplicate paper IDs."""
    duplicate_ids = df[df.duplicated('id', keep=False)]
    if not duplicate_ids.empty:
        print("\n[!] Duplicate IDs found:")
        print(duplicate_ids[['id', 'title']].to_string(index=False))
    else:
        print("\n[✓] No duplicate IDs found.")

def check_duplicate_titles(df):
    """Check for duplicate paper titles."""
    duplicate_titles = df[df.duplicated('title', keep=False)]
    if not duplicate_titles.empty:
        print("\n[!] Duplicate titles found:")
        print(duplicate_titles[['id', 'title']].to_string(index=False))
    else:
        print("\n[✓] No duplicate titles found.")

def check_similar_abstracts(df, similarity_threshold=0.85, show_abstracts=False):
    """Check for similar abstracts using fuzzy string matching."""
    similar_pairs = []
    abstracts = df['abstract'].fillna('').astype(str)  # Handle NaN values
    
    for i in range(len(df)):
        for j in range(i + 1, len(df)):
            # Skip if either abstract is empty
            if not abstracts[i] or not abstracts[j]:
                continue
                
            similarity = SequenceMatcher(None, abstracts[i], abstracts[j]).ratio()
            if similarity > similarity_threshold:
                similar_pairs.append((
                    df.iloc[i]['id'], 
                    df.iloc[j]['id'],
                    df.iloc[i]['title'],
                    df.iloc[j]['title'],
                    similarity,
                    abstracts[i],
                    abstracts[j]
                ))

    if similar_pairs:
        print("\n[!] Similar abstracts found (threshold = {}):".format(similarity_threshold))
        for pair in similar_pairs:
            print(f"\nPapers {pair[0]} and {pair[1]} (similarity: {pair[4]:.2f})")
            print(f"Title 1: {pair[2]}")
            print(f"Title 2: {pair[3]}")
            
            if show_abstracts:
                print("\nAbstract 1:")
                print(pair[5])
                print("\nAbstract 2:")
                print(pair[6])
                print("-" * 80)
    else:
        print("\n[✓] No similar abstracts found (threshold = {}).".format(similarity_threshold))

def main():
    args = parse_args()
    
    print("Verbose mode enabled")
    print(f"PDF parsing available: {PDF_PARSING_AVAILABLE}")
    
    # Setup project directories
    project_dirs = setup_project_directories(args.project)
    data_dir = project_dirs['data_dir']
    papers_dir = project_dirs['papers_dir']
    
    # Create data directory if it doesn't exist
    Path(data_dir).mkdir(exist_ok=True)
    
    # Check for redundant papers if requested
    if args.check:
        check_redundant_papers(data_dir=data_dir, verbose=args.verbose)
        return
    
    # Update download status if requested
    if args.update:
        update_download_status(download_dir=papers_dir, verbose=args.verbose)
        return
        
    # Import directory of PDF files if requested
    if args.import_dir:
        import_pdf_directory(args.import_dir, download_dir=papers_dir, verbose=args.verbose)
        return
        
    # Import PDF file if requested
    if args.import_pdf:
        import_pdf_file(args.import_pdf, download_dir=papers_dir, verbose=args.verbose)
        return
    
    # Query download status if requested
    if args.query:
        query_download_status(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            info_file=os.path.join(data_dir, 'paper_info.csv'),
            download_dir=papers_dir,
            verbose=args.verbose
        )
        return
    
    # Check for combined ACM and retry flags
    if args.acm and args.retry:
        print("Both --acm and --retry flags detected. Attempting to retry downloading ACM papers...")
        try_download_acm_papers(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            info_file=os.path.join(data_dir, 'paper_info.csv'),
            download_dir=papers_dir,
            verbose=args.verbose
        )
        return
    
    # List papers with PDF links that failed to download
    if args.pdf_fail:
        list_pdf_fail_papers(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            info_file=os.path.join(data_dir, 'paper_info.csv'),
            verbose=args.verbose
        )
        return
    
    # List ACM papers that failed to download
    if args.acm:
        list_acm_papers(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            info_file=os.path.join(data_dir, 'paper_info.csv'),
            verbose=args.verbose
        )
        return
        
    # List failed downloads if requested
    if args.retry:
        list_failed_downloads(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            verbose=args.verbose
        )
        return
    
    # Search arXiv for failed downloads
    if args.arxiv:
        try_arxiv_for_failed_papers(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            download_dir=papers_dir,
            verbose=args.verbose
        )
        return
    
    # Validate downloaded PDFs if requested
    if args.validate_pdfs:
        validate_downloaded_pdfs(
            download_dir=papers_dir,
            cache_file=os.path.join(data_dir, 'pdf_validation_cache.json'),
            verbose=args.verbose
        )
        return
    
    # Check if we need to search papers
    if args.keyword:
        api_key = get_serp_api_key()
        
        papers = []
        
        for keyword in args.keyword:
            print(f"Searching for papers with keyword: {keyword}")
            # Add quotes for precise matching if --precise is used
            search_keyword = f'"{keyword}"' if args.precise else keyword
            results = search_google_scholar(search_keyword, api_key, args.num)
            print(f"Found {len(results)} results for '{keyword}'")
            
            for result in results:
                paper = process_search_result(result)
                papers.append(paper)
        
        # Filter redundant papers
        if args.filter:
            print("\nFiltering redundant papers...")
            papers = filter_redundant_papers(papers)
            write_filtered_papers_to_csv(papers, output_file=os.path.join(data_dir, 'paper_info_filter.csv'))
        else:
            # Remove duplicates based on title (basic filtering)
            unique_papers = {}
            for paper in papers:
                if paper['title'] not in unique_papers:
                    unique_papers[paper['title']] = paper
            
            papers = list(unique_papers.values())
            print(f"Total unique papers: {len(papers)}")
        
        # Save paper information to CSV
        paper_info_file = os.path.join(data_dir, 'paper_info.csv')
        with open(paper_info_file, 'w', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow(['id', 'title', 'website_url', 'download_url', 'abstract', 'authors', 'journal', 'publication_date', 'citations', 'citation_link', 'author_year', 'keyword'])
            for paper in papers:
                # Get the first word from title and format it
                title_words = paper['title'].strip().split()
                keyword = title_words[0].replace(' ', '_') if title_words else ''
                
                writer.writerow([
                    paper['id'], 
                    paper['title'], 
                    paper['website_url'], 
                    paper['download_url'], 
                    paper['abstract'],
                    paper.get('authors', ''),
                    paper.get('journal', ''),
                    paper.get('publication_date', ''),
                    paper.get('citations', 0),
                    paper.get('citation_link', ''),
                    paper.get('author_year', ''),
                    keyword
                ])
        
        print(f"Paper information saved to {paper_info_file}")
    else:
        # If no keyword is provided
        if args.doi:
            papers = read_paper_info_csv(filename=os.path.join(data_dir, 'paper_info.csv'))
        elif args.filter:
            # If filter is requested without keyword, read from existing CSV
            papers = read_paper_info_csv(filename=os.path.join(data_dir, 'paper_info.csv'))
            if papers:
                print("\nFiltering redundant papers...")
                filtered_papers = filter_redundant_papers(papers)
                write_filtered_papers_to_csv(filtered_papers, output_file=os.path.join(data_dir, 'paper_info_filter.csv'))
                papers = filtered_papers
            else:
                print("No papers to filter. Exiting.")
                sys.exit(1)
        elif args.download:
            papers = read_paper_info_csv(filename=os.path.join(data_dir, 'paper_info.csv'))
        else:
            print("Error: No keyword provided. Use --keyword to search for papers, --doi to extract DOIs, --filter to filter existing papers, --download to download papers, --validate-pdfs to check downloaded files, --retry to list failed downloads, --arxiv to find papers on arXiv, --pdf-fail to list failed downloads with PDF links, --acm to list failed ACM papers, or --import to import a local PDF.")
            sys.exit(1)
    
    # Extract DOIs if requested
    if args.doi:
        extract_and_write_dois(papers, output_file=os.path.join(data_dir, 'paper_doi.csv'), verbose=args.verbose)
    
    # Try to download papers only if --download flag is provided
    if args.download:
        print("Download flag specified. Starting download process...")
        status_list = []
        
        # Read existing status if available
        existing_status = {}
        status_file = os.path.join(data_dir, 'paper_status.csv')
        if os.path.exists(status_file):
            print("Reading existing download status...")
            with open(status_file, 'r', newline='', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                for row in reader:
                    existing_status[row['paper_id']] = row['status']
            print(f"Found {len(existing_status)} papers with existing status")
        
        for i, paper in enumerate(papers):
            # Skip if paper has any existing status (downloaded, failed, or inaccessible)
            if paper['id'] in existing_status:
                status = existing_status[paper['id']]
                if args.verbose:
                    print(f"Skipping paper {i+1}/{len(papers)}: {paper['title'][:50]}... (previous status: {status})")
                status_list.append({
                    'paper_id': paper['id'],
                    'paper_title': paper['title'],
                    'DOI': paper.get('doi') or 'Unknown',
                    'status': status
                })
                continue
                
            print(f"Downloading paper {i+1}/{len(papers)}: {paper['title'][:50]}...")
            
            # Skip DOI extraction, only use if already available
            status = try_download_paper(paper, download_dir=papers_dir, verbose=args.verbose)
            
            # If download failed and paper already has DOI, try Sci-Hub
            if status == "download failed" and paper.get('doi'):
                print(f"Attempting to download from Sci-Hub for: {paper['title']}")
                status = try_download_from_scihub(paper, download_dir=papers_dir, verbose=args.verbose)
            
            status_list.append({
                'paper_id': paper['id'],
                'paper_title': paper['title'],
                'DOI': paper.get('doi') or 'Unknown',
                'status': status
            })
        
        # Save status information to CSV
        status_file = os.path.join(data_dir, 'paper_status.csv')
        with open(status_file, 'w', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow(['paper_id', 'paper_title', 'DOI', 'status'])
            for status_item in status_list:
                writer.writerow([status_item['paper_id'], status_item['paper_title'], status_item['DOI'], status_item['status']])
        
        print(f"Download status saved to {status_file}")
        
        # Validate downloaded PDFs after download
        print("\nValidating downloaded PDF files...")
        validate_downloaded_pdfs(
            download_dir=papers_dir,
            cache_file=os.path.join(data_dir, 'pdf_validation_cache.json'),
            verbose=args.verbose
        )
        
        # Print summary
        statuses = [item['status'] for item in status_list]
        downloaded = statuses.count('downloaded')
        failed = statuses.count('download failed')
        inaccessible = statuses.count('inaccessible')
        
        print("\nDownload Summary:")
        print(f"Total papers: {len(papers)}")
        print(f"Downloaded: {downloaded}")
        print(f"Failed: {failed}")
        print(f"Inaccessible: {inaccessible}")
        
        # If there are failed downloads, suggest using --retry and --arxiv
        if failed > 0:
            print(f"\nThere were {failed} papers that failed to download.")
            print("Use the --retry option to get a list of these papers for manual download.")
            print("Use the --arxiv option to search arxiv.org for papers that failed to download.")
            print("Use the --pdf-fail option to list papers with PDF links that failed to download.")
            print("Use the --acm option to list papers from dl.acm.org that failed to download.")
            
    elif not args.doi and not args.filter:
        print("No action specified. Use --download to download papers, --doi to extract DOIs, --filter to filter papers, --validate-pdfs to check downloaded files, --retry to list failed downloads, --arxiv to find papers on arXiv, --pdf-fail to list failed downloads with PDF links, --acm to list failed ACM papers, or --import to import a local PDF.")
        print("Use --download flag to enable paper downloads.")

if __name__ == "__main__":
    main() 
