#!/usr/bin/env python3
"""
Script to find papers containing ORISE-related terms and save them to a new CSV file.
"""

import csv
import os
import argparse
from pathlib import Path
from collections import Counter

def save_classified_papers(papers, output_file='data/orise_papers_by_year.csv'):
    """
    Save papers to a CSV file with year classification.
    """
    # Use original fieldnames from the papers
    fieldnames = ['num_id'] + [field for field in papers[0].keys() if field != 'num_id']
    
    # Sort papers by year
    sorted_papers = sorted(papers, key=lambda x: x.get('publication_date', ''))
    
    # Write to CSV
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        
        for idx, paper in enumerate(sorted_papers, 1):
            row = {'num_id': idx}
            row.update({k: v for k, v in paper.items() if k != 'num_id'})
            writer.writerow(row)
    
    print(f"\nClassified papers saved to: {output_file}")

def analyze_papers_by_year(papers):
    """
    Analyze and print the distribution of papers by year.
    """
    # Extract years and count them
    years = [paper.get('publication_date', 'Unknown') for paper in papers]
    year_counts = Counter(years)
    
    # Sort by year
    sorted_years = sorted(year_counts.items())
    
    # Print the analysis
    print("\nPapers by Year:")
    print("-" * 40)
    for year, count in sorted_years:
        print(f"{year}: {count} papers")
    print("-" * 40)
    print(f"Total: {sum(year_counts.values())} papers")

def is_arxiv_only_paper(paper):
    """Check if a paper is only from arXiv."""
    website_url = paper.get('website_url', '').lower()
    journal = paper.get('journal', '').lower()
    return 'arxiv' in website_url and (not journal or 'arxiv' in journal)

def save_arxiv_papers(papers, output_file='data/arxiv_only_papers.csv'):
    """
    Save arXiv-only papers to a separate CSV file.
    """
    if not papers:
        return
        
    # Create data directory if it doesn't exist
    Path('data').mkdir(exist_ok=True)
    
    # Write to CSV
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        fieldnames = ['num_id', 'year'] + [field for field in papers[0].keys() if field != 'num_id']
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        
        for idx, paper in enumerate(papers, 1):
            row = {
                'num_id': idx,
                'year': paper.get('publication_date', 'Unknown'),
            }
            row.update({k: v for k, v in paper.items() if k != 'num_id'})
            writer.writerow(row)
    
    print(f"\nFiltered arXiv papers saved to: {output_file}")
    print("\nFiltered arXiv papers:")
    print("-" * 80)
    for idx, paper in enumerate(papers, 1):
        print(f"{idx}. {paper['title']}")

def read_paper_info_csv(filename):
    """Read paper information from a CSV file."""
    if not Path(filename).exists():
        print(f"Error: Input file {filename} not found")
        return []

    papers = []
    with open(filename, 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        papers = list(reader)
    
    return papers

def write_filtered_papers_to_csv(papers, output_file):
    """Write filtered papers to a CSV file."""
    # Create data directory if it doesn't exist
    Path(output_file).parent.mkdir(exist_ok=True)

    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        if not papers:
            return
            
        writer = csv.DictWriter(f, fieldnames=papers[0].keys())
        writer.writeheader()
        writer.writerows(papers)

def analyze_orise_papers(input_file='data/orise_papers_by_year.csv', output_file='data/orise_papers_filtered.csv', no_arxiv=False):
    """Analyze papers from the input CSV file and save filtered results."""
    papers = read_paper_info_csv(input_file)
    if not papers:
        print(f"No papers found in {input_file}")
        return

    # Filter out arXiv-only papers if requested
    if no_arxiv:
        original_count = len(papers)
        arxiv_papers = [p for p in papers if is_arxiv_only_paper(p)]
        papers = [p for p in papers if not is_arxiv_only_paper(p)]
        print(f"Filtered out {len(arxiv_papers)} arXiv-only papers:")
        print("-" * 80)
        for idx, paper in enumerate(arxiv_papers, 1):
            print(f"{idx}. {paper['title']} ({paper['year']})")
            print(f"   Authors: {paper['authors']}")
            print(f"   URL: {paper['website_url']}")
            print()
        
        # Save arXiv papers to a separate file
        if arxiv_papers:
            arxiv_output = os.path.join(os.path.dirname(output_file), 'arxiv_only_papers.csv')
            write_filtered_papers_to_csv(arxiv_papers, arxiv_output)
            print(f"ArXiv-only papers saved to: {arxiv_output}")
        print("-" * 80)
        
        # Save filtered papers to output_file when using --no-arxiv
        write_filtered_papers_to_csv(papers, output_file)
        print(f"Found {len(papers)} papers, saved to {output_file}")
    else:
        # When not using --no-arxiv, save to orise_papers_by_year.csv
        save_classified_papers(papers)
        print(f"Found {len(papers)} papers")

    # Analyze papers by year
    analyze_papers_by_year(papers)

def main():
    parser = argparse.ArgumentParser(description='Analyze ORISE papers')
    parser.add_argument('--no-arxiv', action='store_true', help='Exclude papers that only come from arXiv')
    parser.add_argument('--input', default='data/orise_papers_by_year.csv', help='Input CSV file')
    parser.add_argument('--output', default='data/orise_papers_filtered.csv', help='Output CSV file')
    args = parser.parse_args()

    analyze_orise_papers(args.input, args.output, args.no_arxiv)

if __name__ == '__main__':
    main() 