#!/usr/bin/env python3
"""
Main entry point for the SERP API search script. Orchestrates all the tools.
"""

import os
import sys
import signal
from contextlib import contextmanager

# Import modules
from tools.cli import parse_args, get_serp_api_key, setup_project_directories
from tools.api import search_google_scholar, process_search_result
from tools.downloader import try_download_paper, try_download_from_scihub, try_download_acm_papers
from tools.data_processor import (
    read_paper_info_csv, extract_and_write_dois, filter_redundant_papers, 
    write_filtered_papers_to_csv, list_failed_downloads, import_pdf_directory, import_pdf_file
)
from tools.file_utils import validate_downloaded_pdfs
from tools.status_utils import (
    update_download_status, query_download_status, 
    list_pdf_fail_papers, list_acm_papers
)
from tools.web_utils import search_arxiv_by_title
from tools.doi_extractor import try_multiple_doi_extraction_methods
from tools.check_redundant_papers import check_redundant_papers

@contextmanager
def timeout(seconds):
    """Set a timeout for a block of code."""
    def signal_handler(signum, frame):
        raise TimeoutError("Timed out!")
    signal.signal(signal.SIGALRM, signal_handler)
    signal.alarm(seconds)
    try:
        yield
    finally:
        signal.alarm(0)

def main():
    """Main function that orchestrates the search and download process."""
    args = parse_args()
    
    if args.verbose:
        print("Verbose mode enabled")
        # Check if PDF parsing is available
        try:
            import PyPDF2
            from pdfminer.high_level import extract_text as pdf_extract_text
            PDF_PARSING_AVAILABLE = True
            print(f"PDF parsing available: {PDF_PARSING_AVAILABLE}")
        except ImportError:
            PDF_PARSING_AVAILABLE = False
            print(f"PDF parsing available: {PDF_PARSING_AVAILABLE}")
    
    # Setup project directories
    project_dirs = setup_project_directories(args.project)
    data_dir = project_dirs['data_dir']
    papers_dir = project_dirs['papers_dir']
    
    # Create data directory if it doesn't exist
    os.makedirs(data_dir, exist_ok=True)
    
    # Check for redundant papers if requested
    if args.check:
        check_redundant_papers(data_dir=data_dir, verbose=args.verbose)
        return
    
    # Update download status if requested
    if args.update:
        update_download_status(download_dir=papers_dir, verbose=args.verbose)
        return
        
    # Import directory of PDF files if requested
    if args.import_dir:
        import_pdf_directory(args.import_dir, download_dir=papers_dir, verbose=args.verbose)
        return
        
    # Import PDF file if requested
    if args.import_pdf:
        import_pdf_file(args.import_pdf, download_dir=papers_dir, verbose=args.verbose)
        return
    
    # Query download status if requested
    if args.query:
        query_download_status(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            info_file=os.path.join(data_dir, 'paper_info.csv'),
            download_dir=papers_dir,
            verbose=args.verbose
        )
        return
    
    # Check for combined ACM and retry flags
    if args.acm and args.retry:
        print("Both --acm and --retry flags detected. Attempting to retry downloading ACM papers...")
        try_download_acm_papers(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            info_file=os.path.join(data_dir, 'paper_info.csv'),
            download_dir=papers_dir,
            verbose=args.verbose
        )
        return
    
    # List papers with PDF links that failed to download
    if args.pdf_fail:
        list_pdf_fail_papers(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            info_file=os.path.join(data_dir, 'paper_info.csv'),
            verbose=args.verbose
        )
        return
    
    # List ACM papers that failed to download
    if args.acm:
        list_acm_papers(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            info_file=os.path.join(data_dir, 'paper_info.csv'),
            verbose=args.verbose
        )
        return
        
    # List failed downloads if requested
    if args.retry:
        list_failed_downloads(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            verbose=args.verbose
        )
        return
    
    # Search arXiv for failed downloads
    if args.arxiv:
        from tools.data_processor import try_arxiv_for_failed_papers
        try_arxiv_for_failed_papers(
            status_file=os.path.join(data_dir, 'paper_status.csv'),
            download_dir=papers_dir,
            verbose=args.verbose
        )
        return
    
    # Validate downloaded PDFs if requested
    if args.validate_pdfs:
        validate_downloaded_pdfs(
            download_dir=papers_dir,
            cache_file=os.path.join(data_dir, 'pdf_validation_cache.json'),
            verbose=args.verbose
        )
        return
    

    
    # Check if we need to search papers
    if args.keyword:
        api_key = get_serp_api_key()
        
        papers = []
        
        for keyword in args.keyword:
            print(f"Searching for papers with keyword: {keyword}")
            # Add quotes for precise matching if --precise is used
            search_keyword = f'"{keyword}"' if args.precise else keyword
            results = search_google_scholar(search_keyword, api_key, args.num)
            print(f"Found {len(results)} results for '{keyword}'")
            
            for result in results:
                paper = process_search_result(result)
                papers.append(paper)
        
        # Filter redundant papers
        if args.filter:
            print("\nFiltering redundant papers...")
            papers = filter_redundant_papers(papers)
            write_filtered_papers_to_csv(papers, output_file=os.path.join(data_dir, 'paper_info_filter.csv'))
        else:
            # Remove duplicates based on title (basic filtering)
            unique_papers = {}
            for paper in papers:
                if paper['title'] not in unique_papers:
                    unique_papers[paper['title']] = paper
            
            papers = list(unique_papers.values())
            print(f"Total unique papers: {len(papers)}")
        
        # Save paper information to CSV
        paper_info_file = os.path.join(data_dir, 'paper_info.csv')
        import csv
        with open(paper_info_file, 'w', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow(['id', 'title', 'website_url', 'download_url', 'abstract', 'authors', 'journal', 'publication_date', 'citations', 'citation_link', 'author_year', 'keyword'])
            for paper in papers:
                # Get the first word from title and format it
                title_words = paper['title'].strip().split()
                keyword = title_words[0].replace(' ', '_') if title_words else ''
                
                writer.writerow([
                    paper['id'], 
                    paper['title'], 
                    paper['website_url'], 
                    paper['download_url'], 
                    paper['abstract'],
                    paper.get('authors', ''),
                    paper.get('journal', ''),
                    paper.get('publication_date', ''),
                    paper.get('citations', 0),
                    paper.get('citation_link', ''),
                    paper.get('author_year', ''),
                    keyword
                ])
        
        print(f"Paper information saved to {paper_info_file}")
    else:
        # If no keyword is provided
        if args.doi:
            papers = read_paper_info_csv(filename=os.path.join(data_dir, 'paper_info.csv'))
        elif args.filter:
            # If filter is requested without keyword, read from existing CSV
            papers = read_paper_info_csv(filename=os.path.join(data_dir, 'paper_info.csv'))
            if papers:
                print("\nFiltering redundant papers...")
                filtered_papers = filter_redundant_papers(papers)
                write_filtered_papers_to_csv(filtered_papers, output_file=os.path.join(data_dir, 'paper_info_filter.csv'))
                papers = filtered_papers
            else:
                print("No papers to filter. Exiting.")
                sys.exit(1)
        elif args.download:
            papers = read_paper_info_csv(filename=os.path.join(data_dir, 'paper_info.csv'))
        else:
            print("Error: No keyword provided. Use --keyword to search for papers, --doi to extract DOIs, --filter to filter existing papers, --download to download papers, --validate-pdfs to check downloaded files, --retry to list failed downloads, --arxiv to find papers on arXiv, --pdf-fail to list failed downloads with PDF links, --acm to list failed ACM papers, or --import to import a local PDF.")
            sys.exit(1)
    
    # Extract DOIs if requested
    if args.doi:
        extract_and_write_dois(papers, output_file=os.path.join(data_dir, 'paper_doi.csv'), verbose=args.verbose)
    
    # Try to download papers only if --download flag is provided
    if args.download:
        print("Download flag specified. Starting download process...")
        status_list = []
        
        # Read existing status if available
        existing_status = {}
        status_file = os.path.join(data_dir, 'paper_status.csv')
        if os.path.exists(status_file):
            print("Reading existing download status...")
            with open(status_file, 'r', newline='', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                for row in reader:
                    existing_status[row['paper_id']] = row['status']
            print(f"Found {len(existing_status)} papers with existing status")
        
        for i, paper in enumerate(papers):
            # Skip if paper has any existing status (downloaded, failed, or inaccessible)
            if paper['id'] in existing_status:
                status = existing_status[paper['id']]
                if args.verbose:
                    print(f"Skipping paper {i+1}/{len(papers)}: {paper['title'][:50]}... (previous status: {status})")
                status_list.append({
                    'paper_id': paper['id'],
                    'paper_title': paper['title'],
                    'DOI': paper.get('doi') or 'Unknown',
                    'status': status
                })
                continue
                
            print(f"Downloading paper {i+1}/{len(papers)}: {paper['title'][:50]}...")
            
            # Skip DOI extraction, only use if already available
            status = try_download_paper(paper, download_dir=papers_dir, verbose=args.verbose)
            
            # If download failed and paper already has DOI, try Sci-Hub
            if status == "download failed" and paper.get('doi'):
                print(f"Attempting to download from Sci-Hub for: {paper['title']}")
                status = try_download_from_scihub(paper, download_dir=papers_dir, verbose=args.verbose)
            
            status_list.append({
                'paper_id': paper['id'],
                'paper_title': paper['title'],
                'DOI': paper.get('doi') or 'Unknown',
                'status': status
            })
        
        # Save status information to CSV
        status_file = os.path.join(data_dir, 'paper_status.csv')
        with open(status_file, 'w', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow(['paper_id', 'paper_title', 'DOI', 'status'])
            for status_item in status_list:
                writer.writerow([status_item['paper_id'], status_item['paper_title'], status_item['DOI'], status_item['status']])
        
        print(f"Download status saved to {status_file}")
        
        # Validate downloaded PDFs after download
        print("\nValidating downloaded PDF files...")
        validate_downloaded_pdfs(
            download_dir=papers_dir,
            cache_file=os.path.join(data_dir, 'pdf_validation_cache.json'),
            verbose=args.verbose
        )
        
        # Print summary
        statuses = [item['status'] for item in status_list]
        downloaded = statuses.count('downloaded')
        failed = statuses.count('download failed')
        inaccessible = statuses.count('inaccessible')
        
        print("\nDownload Summary:")
        print(f"Total papers: {len(papers)}")
        print(f"Downloaded: {downloaded}")
        print(f"Failed: {failed}")
        print(f"Inaccessible: {inaccessible}")
        
        # If there are failed downloads, suggest using --retry and --arxiv
        if failed > 0:
            print(f"\nThere were {failed} papers that failed to download.")
            print("Use the --retry option to get a list of these papers for manual download.")
            print("Use the --arxiv option to search arxiv.org for papers that failed to download.")
            print("Use the --pdf-fail option to list papers with PDF links that failed to download.")
            print("Use the --acm option to list papers from dl.acm.org that failed to download.")
            
    elif not args.doi and not args.filter:
        print("No action specified. Use --download to download papers, --doi to extract DOIs, --filter to filter papers, --validate-pdfs to check downloaded files, --retry to list failed downloads, --arxiv to find papers on arXiv, --pdf-fail to list failed downloads with PDF links, --acm to list failed ACM papers, or --import to import a local PDF.")
        print("Use --download flag to enable paper downloads.")

if __name__ == "__main__":
    main() 