#!/usr/bin/env python3
"""
Data processing utilities for papers.
"""

import os
import csv
import uuid
from pathlib import Path

from .file_utils import title_similarity, extract_title_from_pdf
from .doi_extractor import try_multiple_doi_extraction_methods

def read_paper_info_csv(filename='data/paper_info.csv'):
    """Read paper information from a CSV file."""
    papers = []
    try:
        # Read raw file to handle commas in URLs properly
        with open(filename, 'r', encoding='utf-8') as f:
            # Read the header to get column indices
            header = f.readline().strip().split(',')
            website_url_idx = header.index('website_url') if 'website_url' in header else 2
            download_url_idx = header.index('download_url') if 'download_url' in header else 3
            
            # Use csv module to properly handle quoted fields
            f.seek(0)  # Reset file pointer to beginning
            reader = csv.reader(f)
            next(reader)  # Skip header
            
            for row in reader:
                # Ensure the row has enough elements
                if len(row) > max(website_url_idx, download_url_idx):
                    paper = {
                        'id': row[0] if len(row) > 0 else '',
                        'title': row[1] if len(row) > 1 else 'Unknown Title',
                        'website_url': row[website_url_idx] if len(row) > website_url_idx else '',
                        'download_url': row[download_url_idx] if len(row) > download_url_idx else '',
                        'abstract': row[4] if len(row) > 4 else '',
                        'doi': None,
                        'authors': row[5] if len(row) > 5 else '',
                        'journal': row[6] if len(row) > 6 else '',
                        'publication_date': row[7] if len(row) > 7 else '',
                        'citations': row[8] if len(row) > 8 else 0,
                        'citation_link': row[9] if len(row) > 9 else '',
                        'author_year': row[10] if len(row) > 10 else ''
                    }
                    papers.append(paper)
        
        print(f"Read {len(papers)} papers from {filename}")
    except FileNotFoundError:
        print(f"File {filename} not found")
    except Exception as e:
        print(f"Error reading {filename}: {e}")
        # Try the old method as fallback
        try:
            with open(filename, 'r', newline='', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                for row in reader:
                    paper = {
                        'id': row.get('id', ''),
                        'title': row.get('title', ''),
                        'website_url': row.get('website_url', ''),
                        'download_url': row.get('download_url', ''),
                        'abstract': row.get('abstract', ''),
                        'doi': None,
                        'authors': row.get('authors', ''),
                        'journal': row.get('journal', ''),
                        'publication_date': row.get('publication_date', ''),
                        'citations': row.get('citations', 0),
                        'citation_link': row.get('citation_link', ''),
                        'author_year': row.get('author_year', '')
                    }
                    papers.append(paper)
            print(f"Read {len(papers)} papers from {filename} using fallback method")
        except Exception as e2:
            print(f"Fallback method also failed: {e2}")
    
    return papers

def extract_and_write_dois(papers, output_file='data/paper_doi.csv', verbose=False):
    """Extract DOIs from papers and write to a CSV file."""
    # Create data directory if it doesn't exist
    Path(os.path.dirname(output_file)).mkdir(exist_ok=True)
    
    # Extract DOIs
    doi_count = 0
    pdf_exists_count = 0
    
    print("Extracting DOIs from papers...")
    total_papers = len(papers)
    
    for i, paper in enumerate(papers):
        print(f"Processing paper {i+1}/{total_papers}: {paper['title'][:50]}...")
        
        # Check if PDF exists - use shorter timeout for problematic URLs
        from .web_utils import check_pdf_url
        
        try:
            pdf_exists = False
            if paper['download_url']:
                # Skip SSRN URLs
                if 'papers.ssrn.com' in paper['download_url']:
                    if verbose:
                        print(f"Skipping SSRN URL as it may be inaccessible: {paper['download_url']}")
                else:
                    pdf_exists = check_pdf_url(paper['download_url'], verbose, timeout=5)
            
            if pdf_exists:
                pdf_exists_count += 1
                if verbose:
                    print(f"PDF exists for paper: {paper['title']}")
        except Exception as e:
            if verbose:
                print(f"Error checking if PDF exists: {e}")
        
        # Try all DOI extraction methods
        if not paper.get('doi'):
            paper['doi'] = try_multiple_doi_extraction_methods(paper, verbose)
                
        # Count the successes
        if paper.get('doi'):
            doi_count += 1
            if verbose:
                print(f"Successfully extracted DOI: {paper['doi']}")
        else:
            if verbose:
                print(f"Failed to extract DOI for paper: {paper['title']}")

    # Write to CSV
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['paper_id', 'paper_title', 'doi', 'status'])
        for paper in papers:
            # Determine status based on PDF existence
            status = "undo"
            
            # Skip SSRN URLs
            if paper['download_url'] and 'papers.ssrn.com' in paper['download_url']:
                status = "inaccessible"
            elif paper['download_url'] and check_pdf_url(paper['download_url'], verbose=False, timeout=3):
                status = "pdf exist"
                
            writer.writerow([
                paper['id'],
                paper['title'],
                paper['doi'] or 'Unknown',
                status
            ])
    
    print(f"DOI information saved to {output_file}")
    
    # Print summary
    print(f"\nDOI Summary:")
    print(f"Total papers: {len(papers)}")
    print(f"Papers with DOI: {doi_count}")
    print(f"Papers without DOI: {len(papers) - doi_count}")
    print(f"Papers with PDF: {pdf_exists_count}")

def filter_redundant_papers(papers):
    """Filter out redundant papers based on title similarity."""
    # This is a simple implementation that filters based on exact title match
    # A more sophisticated version could use string similarity algorithms
    
    unique_papers = {}
    redundant_count = 0
    
    for paper in papers:
        # Use title as the key for deduplication
        title = paper['title'].strip().lower()
        if title not in unique_papers:
            unique_papers[title] = paper
        else:
            redundant_count += 1
    
    filtered_papers = list(unique_papers.values())
    
    print(f"Filtered out {redundant_count} redundant papers")
    print(f"Remaining unique papers: {len(filtered_papers)}")
    
    return filtered_papers

def write_filtered_papers_to_csv(papers, output_file='data/paper_info_filter.csv'):
    """Write filtered papers to a CSV file."""
    # Create data directory if it doesn't exist
    Path(os.path.dirname(output_file)).mkdir(exist_ok=True)
    
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['id', 'title', 'website_url', 'download_url', 'abstract', 'authors', 'journal', 'publication_date', 'citations', 'citation_link', 'author_year'])
        for paper in papers:
            writer.writerow([
                paper['id'], 
                paper['title'], 
                paper['website_url'], 
                paper['download_url'], 
                paper['abstract'],
                paper.get('authors', ''),
                paper.get('journal', ''),
                paper.get('publication_date', ''),
                paper.get('citations', 0),
                paper.get('citation_link', ''),
                paper.get('author_year', '')
            ])
    
    print(f"Filtered paper information saved to {output_file}")

def list_failed_downloads(status_file='data/paper_status.csv', verbose=False):
    """List papers that failed to download for retry."""
    if not os.path.exists(status_file):
        print(f"Status file {status_file} not found")
        return
    
    # Create a dictionary to keep track of papers by their ID
    papers = {}
    
    # Read the status file
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            paper_id = row.get('paper_id', '')
            paper_title = row.get('paper_title', '')
            doi = row.get('DOI', '')
            status = row.get('status', '')
            
            # Store the paper information
            papers[paper_id] = {
                'title': paper_title,
                'doi': doi,
                'status': status
            }
    
    # Identify failed downloads
    failed_papers = [p for p_id, p in papers.items() if p['status'] == 'download failed']
    
    # Identify papers with unknown DOI
    unknown_doi_papers = [p for p_id, p in papers.items() 
                          if p['doi'] == 'Unknown' or not p['doi']]
    
    # Count papers with known DOI but failed download
    failed_with_doi = [p for p in failed_papers 
                       if p['doi'] != 'Unknown' and p['doi']]
    
    # Count papers with unknown DOI that failed
    failed_without_doi = [p for p in failed_papers 
                         if p['doi'] == 'Unknown' or not p['doi']]
    
    # Print statistics
    print("\nDOI Statistics:")
    print(f"Total papers: {len(papers)}")
    print(f"Papers with unknown DOI: {len(unknown_doi_papers)} ({len(unknown_doi_papers)/len(papers)*100:.1f}%)")
    print(f"Papers that failed to download: {len(failed_papers)} ({len(failed_papers)/len(papers)*100:.1f}%)")
    print(f"  - Failed with known DOI: {len(failed_with_doi)} ({len(failed_with_doi)/len(failed_papers)*100:.1f}% of failures)")
    print(f"  - Failed with unknown DOI: {len(failed_without_doi)} ({len(failed_without_doi)/len(failed_papers)*100:.1f}% of failures)")
    
    if not failed_papers:
        print("\nNo failed downloads found.")
        return
    
    # Print failed papers
    print(f"\nFound {len(failed_papers)} papers that failed to download:\n")
    print(f"{'Title':<80} {'DOI':<30}")
    print(f"{'-' * 80} {'-' * 30}")
    
    for paper in failed_papers:
        title = paper['title']
        doi = paper['doi']
        
        # Truncate long titles
        if len(title) > 77:
            title = title[:77] + "..."
            
        print(f"{title:<80} {doi:<30}")
    
    # Save the failed papers to a CSV for easy retry
    retry_file = os.path.join(os.path.dirname(status_file), 'retry_downloads.csv')
    with open(retry_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['paper_title', 'DOI'])
        for paper in failed_papers:
            writer.writerow([paper['title'], paper['doi']])
    
    print(f"\nRetry information saved to {retry_file}")
    print("\nYou can manually download these papers or use other tools to fetch them.")

def update_paper_status(paper_id, new_status, status_file='data/paper_status.csv'):
    """Update the status of a paper in the status file."""
    if not os.path.exists(status_file):
        return False
    
    # Read all records
    records = []
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            if row.get('paper_id') == paper_id:
                row['status'] = new_status
            records.append(row)
    
    # Write back all records
    with open(status_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.DictWriter(f, fieldnames=['paper_id', 'paper_title', 'DOI', 'status'])
        writer.writeheader()
        for record in records:
            writer.writerow(record)
    
    return True

def try_arxiv_for_failed_papers(status_file='data/paper_status.csv', download_dir='papers', verbose=False):
    """Try to download papers that failed to download by searching arXiv."""
    if not os.path.exists(status_file):
        print(f"Status file {status_file} not found")
        return
    
    # Create download directory if it doesn't exist
    Path(download_dir).mkdir(exist_ok=True)
    
    # Read the status file
    failed_papers = []
    with open(status_file, 'r', newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            if row.get('status') == 'download failed':
                failed_papers.append({
                    'id': row.get('paper_id', ''),
                    'title': row.get('paper_title', ''),
                    'doi': row.get('DOI', '')
                })
    
    if not failed_papers:
        print("No failed downloads found to retry")
        return
    
    print(f"Found {len(failed_papers)} papers that failed to download. Searching arXiv...")
    
    # Track success and failures
    success_count = 0
    still_failed = []
    
    # Import the web utilities
    from .web_utils import search_arxiv_by_title
    from .network import create_session_with_retries
    from .file_utils import is_valid_pdf_file
    
    # Try to download each paper from arXiv
    for i, paper in enumerate(failed_papers):
        print(f"Processing paper {i+1}/{len(failed_papers)}: {paper['title'][:50]}...")
        
        # Search arXiv for the paper
        pdf_url = search_arxiv_by_title(paper['title'], verbose)
        
        if pdf_url:
            print(f"Found on arXiv: {pdf_url}")
            
            # Attempt to download
            import re
            
            sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title'])
            filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
            
            try:
                session = create_session_with_retries()
                response = session.get(pdf_url, stream=True, timeout=5)
                response.raise_for_status()
                
                with open(filename, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        f.write(chunk)
                
                # Verify the downloaded file is a valid PDF
                if is_valid_pdf_file(filename, verbose):
                    print(f"Successfully downloaded from arXiv to: {filename}")
                    success_count += 1
                    
                    # Update status in the status file
                    update_paper_status(paper['id'], 'downloaded', status_file=status_file)
                else:
                    print(f"Downloaded file from arXiv is not a valid PDF: {filename}")
                    os.remove(filename)
                    still_failed.append(paper)
            
            except Exception as e:
                print(f"Failed to download from arXiv: {e}")
                # Clean up any partially downloaded file
                if os.path.exists(filename):
                    os.remove(filename)
                still_failed.append(paper)
        else:
            print(f"Paper not found on arXiv")
            still_failed.append(paper)
    
    # Print summary
    print("\narXiv Download Summary:")
    print(f"Total papers attempted: {len(failed_papers)}")
    print(f"Successfully downloaded: {success_count}")
    print(f"Still failed: {len(still_failed)}")
    
    if success_count > 0:
        print("\nValidating all downloaded PDF files...")
        from .file_utils import validate_downloaded_pdfs
        validate_downloaded_pdfs(download_dir=download_dir, verbose=verbose)

def import_pdf_directory(directory_path, download_dir='papers', verbose=False):
    """Import all PDF files from a directory."""
    if not os.path.exists(directory_path):
        print(f"Error: Directory not found: {directory_path}")
        return False
    
    # Get all PDF files in the directory
    pdf_files = []
    try:
        for root, _, files in os.walk(directory_path):
            for file in files:
                if file.lower().endswith('.pdf'):
                    pdf_files.append(os.path.join(root, file))
    except Exception as e:
        print(f"Error scanning directory: {e}")
        return False
    
    if not pdf_files:
        print(f"No PDF files found in directory: {directory_path}")
        return False
    
    print(f"Found {len(pdf_files)} PDF files in {directory_path}")
    
    # Import each PDF file
    success_count = 0
    failed_count = 0
    
    for i, pdf_path in enumerate(pdf_files):
        print(f"Processing file {i+1}/{len(pdf_files)}: {os.path.basename(pdf_path)}")
        try:
            if import_pdf_file(pdf_path, download_dir, verbose):
                success_count += 1
            else:
                failed_count += 1
        except Exception as e:
            print(f"Error processing {pdf_path}: {e}")
            failed_count += 1
    
    # Print summary
    print("\nImport Summary:")
    print(f"Total PDF files found: {len(pdf_files)}")
    print(f"Successfully imported: {success_count}")
    print(f"Failed to import: {failed_count}")
    
    return success_count > 0

def import_pdf_file(pdf_path, download_dir='papers', verbose=False):
    """Import a PDF file, identify its title, and update the status."""
    if not os.path.exists(pdf_path):
        print(f"Error: PDF file not found: {pdf_path}")
        return False
    
    # Create download directory if it doesn't exist
    Path(download_dir).mkdir(exist_ok=True)
    
    # Extract title from PDF
    title = extract_title_from_pdf(pdf_path, verbose)
    if not title:
        print("Could not extract title from PDF. Using filename as title.")
        filename = os.path.basename(pdf_path)
        title = os.path.splitext(filename)[0].replace('_', ' ').strip()
    
    # Check if it's a valid PDF
    from .file_utils import is_valid_pdf_file
    
    if not is_valid_pdf_file(pdf_path, verbose):
        print(f"The file is not a valid PDF: {pdf_path}")
        return False
    
    # Sanitize title for filename
    import re
    
    sanitized_title = re.sub(r'[\\/*?:"<>|]', "_", title)
    new_filename = os.path.join(download_dir, f"{sanitized_title[:100]}.pdf")
    
    # Copy the file to papers directory
    import shutil
    try:
        shutil.copy2(pdf_path, new_filename)
        print(f"Imported PDF to: {new_filename}")
    except Exception as e:
        print(f"Error copying PDF file: {e}")
        return False
    
    # Find if the paper already exists in paper_info.csv and paper_status.csv
    paper_id = None
    paper_exists = False
    
    # Determine paths for paper_info.csv and paper_status.csv
    data_dir = os.path.dirname(download_dir) if download_dir != 'papers' else 'data'
    paper_info_file = os.path.join(data_dir, 'paper_info.csv')
    paper_status_file = os.path.join(data_dir, 'paper_status.csv')
    
    # Check paper_info.csv
    if os.path.exists(paper_info_file):
        with open(paper_info_file, 'r', newline='', encoding='utf-8') as f:
            reader = csv.reader(f)
            headers = next(reader)
            title_idx = headers.index('title') if 'title' in headers else 1
            
            for row in reader:
                if len(row) > title_idx:
                    csv_title = row[title_idx]
                    similarity = title_similarity(title, csv_title)
                    
                    if similarity > 0.8:  # Title is very similar
                        paper_id = row[0]
                        paper_exists = True
                        if verbose:
                            print(f"Found matching paper in database with ID: {paper_id}")
                        break
    
    # If paper not found, generate a new ID
    if not paper_id:
        paper_id = str(uuid.uuid4())[:8]
        if verbose:
            print(f"Generated new paper ID: {paper_id}")
    
    # Update or add to paper_status.csv
    status_updated = False
    if paper_exists and os.path.exists(paper_status_file):
        # Update existing status
        status_updated = update_paper_status(paper_id, 'downloaded', status_file=paper_status_file)
        if status_updated and verbose:
            print(f"Updated status for paper ID: {paper_id}")
    
    # If not updated, add a new entry
    if not status_updated:
        if not os.path.exists(paper_status_file):
            # Create the status file if it doesn't exist
            Path(data_dir).mkdir(exist_ok=True)
            with open(paper_status_file, 'w', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow(['paper_id', 'paper_title', 'DOI', 'status'])
        
        # Append the new paper to the status file
        with open(paper_status_file, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow([paper_id, title, 'Unknown', 'downloaded'])
        
        if verbose:
            print(f"Added new entry to status file with ID: {paper_id}")
    
    # If the paper doesn't exist in paper_info.csv, add it
    if not paper_exists and os.path.exists(paper_info_file):
        with open(paper_info_file, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow([
                paper_id, 
                title, 
                '', # website_url
                '', # download_url
                '', # abstract
                '', # authors
                '', # journal
                '', # publication_date
                0,  # citations
                ''  # citation_link
            ])
        
        if verbose:
            print(f"Added new entry to paper info file with ID: {paper_id}")
    
    print(f"Successfully imported PDF: {title}")
    return True 