#!/usr/bin/env python3
"""
File utilities for handling PDFs and other files.
"""

import os
import re
from pathlib import Path
import json
import time

# Check if PDF parsing libraries are available
try:
    import PyPDF2
    from pdfminer.high_level import extract_text as pdf_extract_text
    PDF_PARSING_AVAILABLE = True
except ImportError:
    PDF_PARSING_AVAILABLE = False
    print("Warning: PDF parsing libraries not available. Install PyPDF2 and pdfminer.six for enhanced PDF processing.")

def is_valid_pdf_file(file_path, verbose=False):
    """Check if a file is actually a valid PDF file."""
    if not os.path.exists(file_path):
        return False
        
    # Check the file size - PDFs usually aren't tiny
    file_size = os.path.getsize(file_path)
    if file_size < 1000:  # Files under 1KB are suspicious
        if verbose:
            print(f"File is suspiciously small: {file_size} bytes")
        return False
    
    # Try to read the first few bytes to check for PDF signature
    try:
        with open(file_path, 'rb') as f:
            header = f.read(5)
            # PDF files start with %PDF-
            if header != b'%PDF-':
                if verbose:
                    print(f"File doesn't have PDF signature, found: {header}")
                # Try to check if it's HTML
                f.seek(0)
                content_start = f.read(1000).lower()
                if b'<!doctype html>' in content_start or b'<html' in content_start:
                    if verbose:
                        print(f"File appears to be HTML, not PDF")
                    return False
                return False
    except Exception as e:
        if verbose:
            print(f"Error checking PDF file: {e}")
        return False
        
    # Try to parse with PyPDF2 if available
    if PDF_PARSING_AVAILABLE:
        try:
            with open(file_path, 'rb') as f:
                try:
                    pdf = PyPDF2.PdfReader(f)
                    # Try to get the number of pages (this will fail if not a valid PDF)
                    num_pages = len(pdf.pages)
                    if verbose:
                        print(f"Valid PDF with {num_pages} pages")
                    return True
                except Exception as e:
                    if verbose:
                        print(f"PyPDF2 couldn't parse the file: {e}")
                    return False
        except Exception as e:
            if verbose:
                print(f"Error opening file with PyPDF2: {e}")
            return False
    
    # If PyPDF2 is not available but we got past the header check, it's likely a PDF
    return True

def validate_downloaded_pdfs(download_dir='papers', cache_file='data/pdf_validation_cache.json', verbose=False):
    """Check all downloaded PDF files and record their validation status."""
    if not os.path.exists(download_dir):
        print(f"Download directory {download_dir} does not exist")
        return
    
    # Create data directory if it doesn't exist
    Path(os.path.dirname(cache_file)).mkdir(exist_ok=True)
    
    # Load existing cache if it exists
    cache = {}
    if os.path.exists(cache_file):
        try:
            with open(cache_file, 'r', encoding='utf-8') as f:
                cache = json.load(f)
        except Exception as e:
            if verbose:
                print(f"Error loading cache file: {e}")
            cache = {}
    
    # Get all PDF files
    pdf_files = [f for f in os.listdir(download_dir) if f.lower().endswith('.pdf')]
    
    if not pdf_files:
        print(f"No PDF files found in {download_dir}")
        return
    
    print(f"Checking {len(pdf_files)} PDF files...")
    
    # Check each file
    valid_pdfs = []
    invalid_pdfs = []
    
    for pdf_file in pdf_files:
        file_path = os.path.join(download_dir, pdf_file)
        
        # Check file modification time
        mod_time = os.path.getmtime(file_path)
        
        # Skip if file hasn't changed since last check
        if pdf_file in cache and cache[pdf_file]['last_checked'] >= mod_time:
            if verbose:
                print(f"Skipping unchanged file: {pdf_file}")
            
            if cache[pdf_file]['is_valid']:
                valid_pdfs.append(pdf_file)
            else:
                invalid_pdfs.append(pdf_file)
            continue
        
        if verbose:
            print(f"Checking file: {pdf_file}")
            
        is_valid = is_valid_pdf_file(file_path, verbose)
        
        # Update cache
        cache[pdf_file] = {
            'is_valid': is_valid,
            'last_checked': time.time(),
            'file_size': os.path.getsize(file_path)
        }
        
        if is_valid:
            valid_pdfs.append(pdf_file)
        else:
            invalid_pdfs.append(pdf_file)
    
    # Save cache
    with open(cache_file, 'w', encoding='utf-8') as f:
        json.dump(cache, f, indent=2)
    
    # Report results
    print(f"\nPDF Validation Summary:")
    print(f"Total files checked: {len(pdf_files)}")
    print(f"Valid PDFs: {len(valid_pdfs)}")
    print(f"Invalid PDFs: {len(invalid_pdfs)}")
    
    if invalid_pdfs:
        print("\nInvalid PDF files:")
        for pdf in invalid_pdfs:
            print(f"  - {pdf}")
        
        # Offer to delete invalid PDFs
        if input("\nWould you like to delete invalid PDF files? (y/n): ").lower() == 'y':
            deleted_count = 0
            for pdf in invalid_pdfs:
                try:
                    os.remove(os.path.join(download_dir, pdf))
                    deleted_count += 1
                except Exception as e:
                    print(f"Error deleting {pdf}: {e}")
            
            print(f"Deleted {deleted_count} invalid PDF files")
    
    return valid_pdfs, invalid_pdfs

def extract_title_from_pdf(pdf_path, verbose=False):
    """Extract title from a PDF file."""
    if not PDF_PARSING_AVAILABLE:
        print("PDF parsing libraries not available. Cannot extract title.")
        return None
    
    if verbose:
        print(f"Attempting to extract title from PDF: {pdf_path}")
    
    # Check if it's a valid PDF
    if not is_valid_pdf_file(pdf_path, verbose):
        print(f"The file is not a valid PDF: {pdf_path}")
        return None
    
    title = None
    
    # Blacklist patterns for false positives
    blacklist_patterns = [
        r"see discussions?",
        r"https?://www\.researchgate\.net",
        r"this publication at",
        r"profiles? for this",
        r"author profiles",
        r"copyright\s+\d{4}",
        r"all rights reserved",
        r"^\d+$",  # Just a number
        r"^page \d+$",  # Just a page number
        r"^[0-9\-\.]+$",  # Just numbers and punctuation
        r"^proceedings of",
        r"^preprint\s+",
        r"^paper\s+\d+",
        r"^abstract\s*$",
        r"^--manuscript draft--$",  # Common in drafts
        r"^manuscript number",      # Manuscript metadata
        r"^article type",          # Article metadata
        r"^keywords",              # Keywords section
        r"^powered by",            # Publishing system info
        r"insert your title here",  # Common placeholder
        r"title of (your|the) paper", # Common placeholder
        r"your paper title",       # Common placeholder
        r"untitled",               # Common placeholder
        r"click here to edit",     # Common placeholder
    ]
    
    # Additional template placeholders that should be rejected
    template_titles = [
        "insert your title here",
        "your paper title",
        "title of your paper",
        "untitled document",
        "click here to edit title",
        "enter title here",
        "title goes here",
        "template",
    ]
    
    # Function to check if a string matches any blacklist pattern
    def is_blacklisted(text):
        if not text or len(text) < 5:
            return True
        text_lower = text.lower()
        
        # Check against exact template titles
        if text_lower in template_titles:
            return True
            
        # Check against regex patterns
        for pattern in blacklist_patterns:
            if re.search(pattern, text_lower):
                return True
        return False
    
    # Track all candidates even if we find metadata
    all_candidates = []
    
    # Try using PyPDF2 first
    try:
        with open(pdf_path, 'rb') as f:
            pdf_reader = PyPDF2.PdfReader(f)
            
            # Method 1: Try to get from document info/metadata
            if pdf_reader.metadata:
                meta_title = pdf_reader.metadata.get('/Title')
                if meta_title and isinstance(meta_title, str) and len(meta_title) > 5 and not is_blacklisted(meta_title):
                    if verbose:
                        print(f"Found title in PDF metadata: '{meta_title}'")
                    # Add as a candidate but don't immediately return - check content for better titles
                    all_candidates.append((meta_title, 15))  # High score but not highest
            
            # Method 2: Try first few pages with various heuristics
            candidates = []
            journal_title = None  # For storing potential journal title
            article_title = None  # For storing potential article title
            
            # Process first few pages for candidates
            for i in range(min(3, len(pdf_reader.pages))):
                text = pdf_reader.pages[i].extract_text()
                if not text:
                    continue
                
                # Split by lines and clean
                lines = [line.strip() for line in text.split('\n')]
                lines = [line for line in lines if line and len(line) > 10 and len(line) < 250]
                
                # Process first page differently - title usually at the top
                if i == 0 and lines:
                    # Check for multiline titles (top consecutive lines with similar formatting)
                    multiline_candidates = []
                    
                    # Process potential multiline titles
                    current_title_lines = []
                    for idx, line in enumerate(lines[:10]):  # Consider first 10 lines to find multiline titles
                        if not is_blacklisted(line):
                            # Similar formatting characteristics to detect consecutive title lines
                            is_title_like = (line.istitle() or line.isupper() or
                                            re.match(r'^[A-Z][^.!?]*[A-Za-z]$', line))
                            
                            if is_title_like:
                                current_title_lines.append(line)
                            elif current_title_lines:  # End of a title block
                                if len(current_title_lines) > 1:  # We found a multiline title
                                    # Merge multiline titles into a single line with separator
                                    merged_title = ' - '.join(current_title_lines)
                                    # If title is too long, keep first line and truncate
                                    if len(merged_title) > 120:
                                        merged_title = current_title_lines[0]
                                    # Higher score for earlier multiline titles and more lines
                                    multiline_score = 15 - idx + len(current_title_lines)
                                    multiline_candidates.append((merged_title, multiline_score))
                                current_title_lines = []
                    
                    # Don't forget to add the last set of title lines if we reach the end
                    if len(current_title_lines) > 1:
                        # Merge multiline titles into a single line with separator
                        merged_title = ' - '.join(current_title_lines)
                        # If title is too long, keep first line and truncate
                        if len(merged_title) > 120:
                            merged_title = current_title_lines[0]
                        multiline_score = 15 - len(lines[:10]) + len(current_title_lines)
                        multiline_candidates.append((merged_title, multiline_score))
                    
                    # Special handling for academic papers: first line might be journal name, second might be article title
                    top_lines = [line for line in lines[:5] if not is_blacklisted(line)]
                    if len(top_lines) >= 2:
                        # First non-blacklisted line might be journal title
                        journal_title = top_lines[0]
                        # Second non-blacklisted line might be article title
                        article_title = top_lines[1]
                        # If there's a third line that looks like article subtitle, include it
                        if len(top_lines) >= 3 and len(top_lines[2]) > 10:
                            # Check if the third line might be a continuation of the title
                            if not re.match(r'^(by|author|abstract|introduction|keywords)', top_lines[2].lower()):
                                article_title += " - " + top_lines[2]
                        
                        # Add combined journal + article title as a high-scoring candidate
                        combined_title = f"{journal_title} - {article_title}"
                        candidates.append((combined_title, 18))  # Highest score for combined title
                    
                    # Add individual top lines as candidates
                    for idx, line in enumerate(lines[:5]):
                        if not is_blacklisted(line):
                            # Higher score for earlier lines on first page
                            candidates.append((line, 10-idx))
                    
                    # Add multiline candidates with boosted scores
                    candidates.extend(multiline_candidates)
                
                # Look for specific title patterns
                for idx, line in enumerate(lines):
                    # Look for lines with capitalization patterns typical of titles
                    if re.match(r'^[A-Z][^.!?]*[A-Za-z]$', line) and not is_blacklisted(line):
                        # Title case or ALL CAPS are good indicators
                        if line.istitle() or line.isupper():
                            candidates.append((line, 8))
                        else:
                            candidates.append((line, 5))
                    
                    # Look for "Title:" pattern
                    title_pattern = re.search(r'(?:TITLE|Title):\s*([^\n]+)', line, re.IGNORECASE)
                    if title_pattern:
                        title_candidate = title_pattern.group(1).strip()
                        if not is_blacklisted(title_candidate):
                            candidates.append((title_candidate, 12))  # Higher score for explicit title
            
            # Look for title in submission text
            for idx, line in enumerate(lines):
                if "submit" in line.lower() and "entitled" in line.lower():
                    # This is likely a submission cover which might contain the title
                    title_match = re.search(r'entitled:?\s*"([^"]+)"', line, re.IGNORECASE)
                    if title_match:
                        submission_title = title_match.group(1).strip()
                        if not is_blacklisted(submission_title):
                            candidates.append((submission_title, 12))  # High score for explicit submission title
            
            # Add all content-based candidates
            all_candidates.extend(candidates)
                
    except Exception as e:
        if verbose:
            print(f"Error extracting title with PyPDF2: {e}")
    
    # If PyPDF2 fails or we want to double-check with pdfminer, try it
    try:
        with open(pdf_path, 'rb') as f:
            text = pdf_extract_text(f, maxpages=2)
            
            if not text:
                if verbose:
                    print("No text extracted with pdfminer")
            else:
                # Parse with same heuristics as above
                candidates = []
                lines = [line.strip() for line in text.split('\n')]
                lines = [line for line in lines if line and len(line) > 10 and len(line) < 250]
                
                # Check for multiline titles (top consecutive lines with similar formatting)
                multiline_candidates = []
                journal_title = None
                article_title = None
                
                # Process potential multiline titles
                current_title_lines = []
                for idx, line in enumerate(lines[:10]):  # Consider first 10 lines to find multiline titles
                    if not is_blacklisted(line):
                        # Similar formatting characteristics to detect consecutive title lines
                        is_title_like = (line.istitle() or line.isupper() or
                                        re.match(r'^[A-Z][^.!?]*[A-Za-z]$', line))
                        
                        if is_title_like:
                            current_title_lines.append(line)
                        elif current_title_lines:  # End of a title block
                            if len(current_title_lines) > 1:  # We found a multiline title
                                # Merge multiline titles into a single line with separator
                                merged_title = ' - '.join(current_title_lines)
                                # If title is too long, keep first line and truncate
                                if len(merged_title) > 120:
                                    merged_title = current_title_lines[0]
                                # Higher score for earlier multiline titles and more lines
                                multiline_score = 15 - idx + len(current_title_lines)
                                multiline_candidates.append((merged_title, multiline_score))
                            current_title_lines = []
                
                # Don't forget to add the last set of title lines if we reach the end
                if len(current_title_lines) > 1:
                    # Merge multiline titles into a single line with separator
                    merged_title = ' - '.join(current_title_lines)
                    # If title is too long, keep first line and truncate
                    if len(merged_title) > 120:
                        merged_title = current_title_lines[0]
                    multiline_score = 15 - len(lines[:10]) + len(current_title_lines)
                    multiline_candidates.append((merged_title, multiline_score))
                
                # Special handling for academic papers: first line might be journal name, second might be article title
                top_lines = [line for line in lines[:5] if not is_blacklisted(line)]
                if len(top_lines) >= 2:
                    # First non-blacklisted line might be journal title
                    journal_title = top_lines[0]
                    # Second non-blacklisted line might be article title
                    article_title = top_lines[1]
                    # If there's a third line that looks like article subtitle, include it
                    if len(top_lines) >= 3 and len(top_lines[2]) > 10:
                        # Check if the third line might be a continuation of the title
                        if not re.match(r'^(by|author|abstract|introduction|keywords)', top_lines[2].lower()):
                            article_title += " - " + top_lines[2]
                    
                    # Add combined journal + article title as a high-scoring candidate
                    combined_title = f"{journal_title} - {article_title}"
                    candidates.append((combined_title, 18))  # Highest score for combined title
                
                # First 5 lines are good candidates for titles
                for idx, line in enumerate(lines[:5]):
                    if not is_blacklisted(line):
                        candidates.append((line, 10-idx))
                
                # Add multiline candidates with boosted scores
                candidates.extend(multiline_candidates)
                
                # Look for title in submission text
                for idx, line in enumerate(lines):
                    if "submit" in line.lower() and "entitled" in line.lower():
                        # This is likely a submission cover which might contain the title
                        title_match = re.search(r'entitled:?\s*"([^"]+)"', line, re.IGNORECASE)
                        if title_match:
                            submission_title = title_match.group(1).strip()
                            if not is_blacklisted(submission_title):
                                candidates.append((submission_title, 12))  # High score for explicit submission title
                
                # Look for title patterns
                for line in lines:
                    # Title case or ALL CAPS are good indicators
                    if re.match(r'^[A-Z][^.!?]*[A-Za-z]$', line) and not is_blacklisted(line):
                        if line.istitle() or line.isupper():
                            candidates.append((line, 8))
                        else:
                            candidates.append((line, 5))
                    
                    # Look for "Title:" pattern
                    title_pattern = re.search(r'(?:TITLE|Title):\s*([^\n]+)', line, re.IGNORECASE)
                    if title_pattern:
                        title_candidate = title_pattern.group(1).strip()
                        if not is_blacklisted(title_candidate):
                            candidates.append((title_candidate, 12))  # Higher score for explicit title
                
                # Add pdfminer candidates
                all_candidates.extend(candidates)
                
    except Exception as e:
        if verbose:
            print(f"Error extracting title with pdfminer: {e}")
    
    # If we have candidates, pick the highest scored one
    if all_candidates:
        if verbose:
            print("All title candidates:")
            for i, (candidate, score) in enumerate(sorted(all_candidates, key=lambda x: (-x[1], len(x[0])))):
                print(f"  {i+1}. Score {score}: '{candidate}'")
        
        all_candidates.sort(key=lambda x: (-x[1], len(x[0])))  # Sort by score (high to low), then by length (short to long)
        title = all_candidates[0][0]
        if verbose:
            print(f"Selected title: '{title}'")
        return title
    
    # Last resort: use PDF filename as the title but clean it up
    try:
        filename = os.path.basename(pdf_path)
        filename_without_ext = os.path.splitext(filename)[0]
        if filename_without_ext:
            # Remove patterns like timestamps, UUIDs, or paper IDs
            cleaned = re.sub(r'\b([0-9a-f]{8,}|[A-Z0-9]+-\d+|\d{4}\.\d+v\d+)\b', '', filename_without_ext)
            # Remove special characters
            cleaned = re.sub(r'[_\-\.]+', ' ', cleaned).strip()
            if cleaned and len(cleaned) > 3:
                title = cleaned
                if verbose:
                    print(f"Using cleaned filename as title: '{title}'")
                return title
            else:
                title = filename_without_ext.replace('_', ' ').strip()
                if verbose:
                    print(f"Using filename as title: '{title}'")
                return title
    except Exception as e:
        if verbose:
            print(f"Error using filename as title: {e}")
    
    if verbose:
        print("Could not extract a title from the PDF")
    return None

def title_similarity(title1, title2):
    """Calculate similarity between two titles."""
    if not title1 or not title2:
        return 0
    
    t1 = title1.lower()
    t2 = title2.lower()
    words1 = set(t1.split())
    words2 = set(t2.split())
    common_words = words1.intersection(words2)
    
    if not words1 or not words2:
        return 0
    
    return len(common_words) / max(len(words1), len(words2)) 