#!/usr/bin/env python3
"""
Utilities for checking and processing download status.
"""

import os
import csv
from pathlib import Path

def update_download_status(download_dir='papers', verbose=False):
    """Scan the papers directory and update status for files that exist but are marked as failed downloads."""
    # Determine paths for paper_status.csv
    data_dir = os.path.dirname(download_dir) if download_dir != 'papers' else 'data'
    paper_status_file = os.path.join(data_dir, 'paper_status.csv')
    
    if not os.path.exists(paper_status_file):
        print("Error: paper_status.csv not found. No status to update.")
        return
    
    if not os.path.exists(download_dir):
        print(f"Error: Papers directory '{download_dir}' not found.")
        return
    
    # Get all PDF files in the papers directory
    pdf_files = []
    for file in os.listdir(download_dir):
        if file.lower().endswith('.pdf'):
            pdf_files.append(file)
    
    if not pdf_files:
        print(f"No PDF files found in {download_dir}/")
        return
    
    # Read the paper status data
    papers_to_update = []
    updated_count = 0
    
    try:
        # Load paper status data
        status_data = []
        with open(paper_status_file, 'r', newline='', encoding='utf-8') as f:
            reader = csv.reader(f)
            headers = next(reader)  # Get headers
            
            # Check column indices
            paper_id_idx = headers.index('paper_id') if 'paper_id' in headers else 0
            title_idx = headers.index('paper_title') if 'paper_title' in headers else 1
            status_idx = headers.index('status') if 'status' in headers else -1
            
            if status_idx == -1:
                print("Error: 'status' column not found in paper_status.csv")
                return
            
            # Read all data
            for row in reader:
                if len(row) > max(paper_id_idx, title_idx, status_idx):
                    status_data.append(row)
        
        # Find papers marked as failed that might now be in the papers directory
        for row in status_data:
            paper_id = row[paper_id_idx]
            paper_title = row[title_idx]
            status = row[status_idx]
            
            if status.lower() == 'download failed':
                # Check if a matching PDF exists in the papers directory
                import re
                from .file_utils import title_similarity
                
                sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper_title)
                
                # Check for potential truncated filenames - some PDFs might have been saved with truncated names
                found_match = False
                for pdf_file in pdf_files:
                    # Remove extension to compare
                    pdf_name_without_ext = os.path.splitext(pdf_file)[0]
                    
                    # Compare with title similarity to account for minor differences
                    similarity = title_similarity(sanitized_title, pdf_name_without_ext)
                    
                    if similarity > 0.7:  # High similarity threshold
                        if verbose:
                            print(f"Found matching PDF for paper: {paper_title}")
                            print(f"  Matched file: {pdf_file}")
                            print(f"  Similarity score: {similarity:.2f}")
                        
                        papers_to_update.append((paper_id, paper_title, pdf_file))
                        found_match = True
                        break
                
                if not found_match and verbose:
                    # Try to find close matches for debugging purposes
                    close_matches = [(pdf_file, title_similarity(sanitized_title, os.path.splitext(pdf_file)[0])) 
                                    for pdf_file in pdf_files]
                    close_matches.sort(key=lambda x: -x[1])  # Sort by similarity (highest first)
                    
                    if close_matches and close_matches[0][1] > 0.4:  # If we have any decent matches, show them
                        print(f"No exact match for paper: {paper_title}")
                        print(f"  Closest files (with similarity scores):")
                        for pdf_file, score in close_matches[:3]:  # Show top 3
                            print(f"  - {pdf_file} (similarity: {score:.2f})")
        
        # Update status for matched papers
        if papers_to_update:
            from .data_processor import update_paper_status
            
            print(f"Found {len(papers_to_update)} papers to update:")
            for paper_id, paper_title, pdf_file in papers_to_update:
                print(f"  • {paper_title}")
                if update_paper_status(paper_id, 'downloaded', status_file=paper_status_file):
                    updated_count += 1
            
            print(f"\nSuccessfully updated status for {updated_count} papers.")
        else:
            print("No papers found that need status updates.")
    
    except Exception as e:
        print(f"Error updating paper status: {e}")

def query_download_status(status_file='data/paper_status.csv', info_file='data/paper_info.csv', download_dir='papers', verbose=False):
    """Query download status and print information about failed downloads classified by source website."""
    if not os.path.exists(status_file):
        print(f"Status file {status_file} not found. No download information available.")
        return
    
    if not os.path.exists(info_file):
        print(f"Paper info file {info_file} not found. Cannot classify by source website.")
        return
    
    # Create download directory if it doesn't exist (to avoid errors)
    Path(download_dir).mkdir(exist_ok=True)
    
    # Read the status file
    download_statuses = {}
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            paper_id = row.get('paper_id', '')
            status = row.get('status', '')
            download_statuses[paper_id] = {
                'title': row.get('paper_title', ''),
                'status': status,
                'doi': row.get('DOI', '')
            }
    
    # Read the paper info file to get URLs
    paper_info = {}
    with open(info_file, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        header = next(reader)
        id_idx = header.index('id') if 'id' in header else 0
        title_idx = header.index('title') if 'title' in header else 1
        website_url_idx = header.index('website_url') if 'website_url' in header else 2
        download_url_idx = header.index('download_url') if 'download_url' in header else 3
        
        for row in reader:
            if len(row) > max(id_idx, website_url_idx, download_url_idx, title_idx):
                paper_id = row[id_idx]
                paper_info[paper_id] = {
                    'title': row[title_idx],
                    'website_url': row[website_url_idx],
                    'download_url': row[download_url_idx]
                }
    
    # Get list of downloaded PDF files
    pdf_files = [f.lower() for f in os.listdir(download_dir) if f.lower().endswith('.pdf')]
    
    # Count different statuses
    status_counts = {}
    for paper_id, info in download_statuses.items():
        status = info['status']
        if status not in status_counts:
            status_counts[status] = 0
        status_counts[status] += 1
    
    # Print summary
    print("\nDownload Status Summary:")
    print(f"Total papers tracked: {len(download_statuses)}")
    for status, count in status_counts.items():
        print(f"{status}: {count} papers")
    
    # Find papers that failed to download
    failed_papers = []
    for paper_id, status_info in download_statuses.items():
        if status_info['status'] == 'download failed':
            # Get paper title and sanitize it to check if it exists on disk
            title = status_info['title']
            import re
            
            sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", title)
            sanitized_title = sanitized_title[:100].lower() + ".pdf"
            
            # Check if this paper might already be downloaded despite status
            if any(sanitized_title.lower() in pdf_file.lower() for pdf_file in pdf_files):
                # If file exists, update status in memory (we won't write back to CSV now)
                print(f"Note: Paper '{title[:50]}...' marked as failed but PDF exists - skipping")
                continue
            
            paper = {
                'id': paper_id,
                'title': title,
                'doi': status_info['doi'],
                'website_url': '',
                'download_url': ''
            }
            
            # Add URL information if available
            if paper_id in paper_info:
                paper['website_url'] = paper_info[paper_id]['website_url']
                paper['download_url'] = paper_info[paper_id]['download_url']
                
            failed_papers.append(paper)
    
    if not failed_papers:
        print("\nNo failed downloads found!")
        return
        
    # Classify papers by source website
    sources = {
        'ieee': {'name': 'IEEE Explore', 'domain': 'ieeexplore.ieee.org', 'papers': []},
        'springer': {'name': 'Springer', 'domain': 'link.springer.com', 'papers': []},
        'sciencedirect': {'name': 'ScienceDirect', 'domain': 'sciencedirect.com', 'papers': []},
        'acm': {'name': 'ACM Digital Library', 'domain': 'dl.acm.org', 'papers': []},
        'researchgate': {'name': 'ResearchGate', 'domain': 'researchgate.net', 'papers': []},
        'arxiv': {'name': 'arXiv', 'domain': 'arxiv.org', 'papers': []},
        'other': {'name': 'Other Sources', 'domain': '', 'papers': []}
    }
    
    # Classify each paper
    for paper in failed_papers:
        classified = False
        
        # First try website_url
        url = paper['website_url']
        if not url:
            # If no website_url, try download_url
            url = paper['download_url']
        
        if not url:
            sources['other']['papers'].append(paper)
            continue
            
        # Check each source
        for source_key, source_info in sources.items():
            if source_key != 'other' and source_info['domain'] in url:
                sources[source_key]['papers'].append(paper)
                classified = True
                break
                
        if not classified:
            sources['other']['papers'].append(paper)
    
    # Output results by source
    total_failed = len(failed_papers)
    print(f"\nPapers failed to download ({total_failed}) by source:")
    
    for source_key, source_info in sources.items():
        papers = source_info['papers']
        if papers:
            print(f"\n@{source_info['domain'] if source_info['domain'] else 'unknown'} - {source_info['name']} ({len(papers)} papers):")
            print(f"{'Title':<80} {'URL'}")
            print(f"{'-' * 80} {'-' * 50}")
            
            for paper in papers:
                title = paper['title']
                if len(title) > 77:
                    title = title[:77] + "..."
                
                # Use the most informative URL
                url = paper['website_url'] if paper['website_url'] else paper['download_url']
                    
                print(f"{title:<80} {url}")
    
    # Save to CSV by source
    output_file = os.path.join(os.path.dirname(status_file), 'failed_downloads_by_source.csv')
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['source', 'paper_id', 'title', 'website_url', 'download_url', 'doi'])
        
        for source_key, source_info in sources.items():
            for paper in source_info['papers']:
                writer.writerow([
                    source_info['domain'] if source_info['domain'] else 'unknown',
                    paper['id'],
                    paper['title'],
                    paper['website_url'],
                    paper['download_url'],
                    paper['doi']
                ])
    
    print(f"\nDetailed classification saved to {output_file}")
    print("\nNote: To update status of papers that are marked as failed but actually downloaded,")
    print("run the script with --validate-pdfs option to refresh the status information.")

def list_pdf_fail_papers(status_file='data/paper_status.csv', info_file='data/paper_info.csv', verbose=False):
    """List papers that have PDF download links but failed to download."""
    # Check if required files exist
    if not os.path.exists(status_file) or not os.path.exists(info_file):
        print(f"Required files not found: {status_file} and/or {info_file}")
        return
    
    # Read paper status
    status_dict = {}
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            paper_id = row.get('paper_id', '')
            status = row.get('status', '')
            if paper_id and status:
                status_dict[paper_id] = status
    
    # Read paper info to get download URLs
    papers_with_pdf_links = []
    with open(info_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.reader(f)
        headers = next(reader)
        download_url_idx = headers.index('download_url') if 'download_url' in headers else 3
        title_idx = headers.index('title') if 'title' in headers else 1
        
        for row in reader:
            if len(row) > max(download_url_idx, title_idx):
                paper_id = row[0]
                title = row[title_idx]
                download_url = row[download_url_idx]
                
                # Check if the paper has a PDF link but failed to download
                if download_url and paper_id in status_dict and status_dict[paper_id] == 'download failed':
                    papers_with_pdf_links.append({
                        'id': paper_id,
                        'title': title,
                        'download_url': download_url
                    })
    
    if not papers_with_pdf_links:
        print("No papers found with PDF links that failed to download.")
        return
    
    # Print results
    print(f"\nFound {len(papers_with_pdf_links)} papers with PDF links that failed to download:\n")
    print(f"{'Title':<80} {'Download URL'}")
    print(f"{'-' * 80} {'-' * 50}")
    
    for paper in papers_with_pdf_links:
        title = paper['title']
        url = paper['download_url']
        
        # Truncate long titles only
        if len(title) > 77:
            title = title[:77] + "..."
            
        print(f"{title:<80} {url}")
    
    # Save to CSV
    output_file = os.path.join(os.path.dirname(status_file), 'pdf_fail_papers.csv')
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['paper_id', 'title', 'download_url'])
        for paper in papers_with_pdf_links:
            writer.writerow([paper['id'], paper['title'], paper['download_url']])
    
    print(f"\nList saved to {output_file}")

def list_acm_papers(status_file='data/paper_status.csv', info_file='data/paper_info.csv', verbose=False):
    """List papers from dl.acm.org that failed to download."""
    # Check if required files exist
    if not os.path.exists(status_file) or not os.path.exists(info_file):
        print(f"Required files not found: {status_file} and/or {info_file}")
        return
    
    # Read paper status
    status_dict = {}
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            paper_id = row.get('paper_id', '')
            status = row.get('status', '')
            if paper_id and status:
                status_dict[paper_id] = status
    
    # Read paper info to find ACM papers that failed
    acm_papers = []
    with open(info_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.reader(f)
        headers = next(reader)
        website_url_idx = headers.index('website_url') if 'website_url' in headers else 2
        title_idx = headers.index('title') if 'title' in headers else 1
        
        for row in reader:
            if len(row) > max(website_url_idx, title_idx):
                paper_id = row[0]
                title = row[title_idx]
                website_url = row[website_url_idx]
                
                # Check if the paper is from ACM and failed to download
                if 'dl.acm.org' in website_url and paper_id in status_dict and status_dict[paper_id] == 'download failed':
                    acm_papers.append({
                        'id': paper_id,
                        'title': title,
                        'website_url': website_url
                    })
    
    if not acm_papers:
        print("No papers found from dl.acm.org that failed to download.")
        return
    
    # Print results
    print(f"\nFound {len(acm_papers)} papers from dl.acm.org that failed to download:\n")
    print(f"{'Title':<80} {'ACM URL'}")
    print(f"{'-' * 80} {'-' * 50}")
    
    for paper in acm_papers:
        title = paper['title']
        url = paper['website_url']
        
        # Truncate long titles only
        if len(title) > 77:
            title = title[:77] + "..."
            
        print(f"{title:<80} {url}")
    
    # Save to CSV
    output_file = os.path.join(os.path.dirname(status_file), 'acm_fail_papers.csv')
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['paper_id', 'title', 'website_url'])
        for paper in acm_papers:
            writer.writerow([paper['id'], paper['title'], paper['website_url']])
    
    print(f"\nList saved to {output_file}") 