import pandas as pd
from difflib import SequenceMatcher
import argparse
from pathlib import Path

def check_duplicate_ids(df):
    """Check for duplicate paper IDs."""
    duplicate_ids = df[df.duplicated('id', keep=False)]
    if not duplicate_ids.empty:
        print("\n[!] Duplicate IDs found:")
        print(duplicate_ids[['id', 'title']].to_string(index=False))
    else:
        print("\n[✓] No duplicate IDs found.")

def check_duplicate_titles(df):
    """Check for duplicate and similar paper titles."""
    # First check for exact duplicates
    duplicate_titles = df[df.duplicated('title', keep=False)]
    if not duplicate_titles.empty:
        print("\n[!] Duplicate titles found:")
        print(duplicate_titles[['id', 'title']].to_string(index=False))
    else:
        print("\n[✓] No duplicate titles found.")
    
    # Then check for similar titles and authors
    similar_pairs = []
    titles = df['title'].fillna('').astype(str)
    authors = df['authors'].fillna('').astype(str)
    
    for i in range(len(df)):
        for j in range(i + 1, len(df)):
            # Skip if either title is empty
            if not titles[i] or not titles[j]:
                continue
            
            title_similarity = SequenceMatcher(None, titles[i].lower(), titles[j].lower()).ratio()
            if title_similarity > 0.85 and title_similarity < 1.0:  # Only show similar but not exact matches
                # Calculate author similarity if authors exist
                author_similarity = 0.0
                if authors[i] and authors[j]:
                    author_similarity = SequenceMatcher(None, authors[i].lower(), authors[j].lower()).ratio()
                
                similar_pairs.append((
                    df.iloc[i]['id'],
                    df.iloc[j]['id'],
                    df.iloc[i]['title'],
                    df.iloc[j]['title'],
                    title_similarity,
                    df.iloc[i]['authors'],
                    df.iloc[j]['authors'],
                    author_similarity,
                    df.iloc[i],  # Full paper info
                    df.iloc[j]   # Full paper info
                ))
    
    if similar_pairs:
        print("\n[!] Similar titles found:")
        for pair in similar_pairs:
            print(f"\nPapers {pair[0]} and {pair[1]}")
            print(f"Title similarity: {pair[4]:.2f}")
            print(f"Title 1: {pair[2]}")
            print(f"Title 2: {pair[3]}")
            
            if pair[5] and pair[6]:  # If authors exist
                print(f"Author similarity: {pair[7]:.2f}")
                print(f"Authors 1: {pair[5]}")
                print(f"Authors 2: {pair[6]}")
            
            # Print full paper information
            print("\nPaper 1 full information:")
            for col in pair[8].index:
                print(f"{col}: {pair[8][col]}")
            
            print("\nPaper 2 full information:")
            for col in pair[9].index:
                print(f"{col}: {pair[9][col]}")
            
            print("-" * 80)
    else:
        print("\n[✓] No similar titles found.")

def check_duplicate_urls(df):
    """Check for duplicate website URLs."""
    # Check website URLs only
    duplicate_website = df[df.duplicated('website_url', keep=False)]
    if not duplicate_website.empty:
        print("\n[!] Duplicate website URLs found:")
        print(duplicate_website[['id', 'title', 'website_url']].to_string(index=False))
    else:
        print("\n[✓] No duplicate website URLs found.")

def check_similar_abstracts(df, similarity_threshold=0.85, show_abstracts=False):
    """Check for similar abstracts using fuzzy string matching."""
    similar_pairs = []
    abstracts = df['abstract'].fillna('').astype(str)  # Handle NaN values
    
    for i in range(len(df)):
        for j in range(i + 1, len(df)):
            # Skip if either abstract is empty
            if not abstracts[i] or not abstracts[j]:
                continue
                
            similarity = SequenceMatcher(None, abstracts[i], abstracts[j]).ratio()
            if similarity > similarity_threshold:
                similar_pairs.append((
                    df.iloc[i]['id'], 
                    df.iloc[j]['id'],
                    df.iloc[i]['title'],
                    df.iloc[j]['title'],
                    similarity,
                    abstracts[i],
                    abstracts[j]
                ))

    if similar_pairs:
        print("\n[!] Similar abstracts found (threshold = {}):".format(similarity_threshold))
        for pair in similar_pairs:
            print(f"\nPapers {pair[0]} and {pair[1]} (similarity: {pair[4]:.2f})")
            print(f"Title 1: {pair[2]}")
            print(f"Title 2: {pair[3]}")
            
            if show_abstracts:
                print("\nAbstract 1:")
                print(pair[5])
                print("\nAbstract 2:")
                print(pair[6])
                print("-" * 80)
    else:
        print("\n[✓] No similar abstracts found (threshold = {}).".format(similarity_threshold))

def filter_redundant_papers(df, similar_pairs):
    """Filter out redundant papers, preferring non-preprint versions."""
    papers_to_remove = set()
    undecided_pairs = []
    manual_review_pairs = []
    
    for pair in similar_pairs:
        # Only consider pairs with both high title similarity
        if pair[4] > 0.85:  # title_similarity
            paper1, paper2 = pair[8], pair[9]  # Full paper info
            
            # Check if either paper is from a preprint server
            paper1_preprint = False
            paper2_preprint = False
            
            # Check website URLs and journals for preprint servers
            preprint_indicators = ['arxiv.org', 'chemrxiv.org', 'arXiv preprint', 'bioRxiv']
            journal_indicators = ['sciencedirect.com', 'springer.com', 'wiley.com', 'nature.com', 'acs.org', 
                                'tandfonline.com', 'ieee.org', 'iopscience.iop.org', 'rsc.org', 'pubs.rsc.org']
            
            # Check for preprints
            if isinstance(paper1['website_url'], str):
                paper1_preprint = any(ind in paper1['website_url'].lower() for ind in preprint_indicators)
            if isinstance(paper2['website_url'], str):
                paper2_preprint = any(ind in paper2['website_url'].lower() for ind in preprint_indicators)
                
            # Also check journal field for preprints
            if isinstance(paper1['journal'], str):
                paper1_preprint = paper1_preprint or any(ind in str(paper1['journal']).lower() for ind in preprint_indicators)
            if isinstance(paper2['journal'], str):
                paper2_preprint = paper2_preprint or any(ind in str(paper2['journal']).lower() for ind in preprint_indicators)
            
            # Check for journal publications
            paper1_journal = False
            paper2_journal = False
            if isinstance(paper1['website_url'], str):
                paper1_journal = any(ind in paper1['website_url'].lower() for ind in journal_indicators)
            if isinstance(paper2['website_url'], str):
                paper2_journal = any(ind in paper2['website_url'].lower() for ind in journal_indicators)
            
            # Decision logic
            if paper1_preprint and not paper2_preprint:
                papers_to_remove.add(paper1['id'])
            elif paper2_preprint and not paper1_preprint:
                papers_to_remove.add(paper2['id'])
            elif paper1_preprint and paper2_preprint:
                # If both are preprints, keep the one with more citations
                citations1 = int(paper1['citations']) if pd.notna(paper1['citations']) else 0
                citations2 = int(paper2['citations']) if pd.notna(paper2['citations']) else 0
                
                if citations1 > citations2:
                    papers_to_remove.add(paper2['id'])
                elif citations2 > citations1:
                    papers_to_remove.add(paper1['id'])
                else:
                    # If same citations, prefer the one with a journal name
                    if pd.notna(paper1['journal']) and pd.isna(paper2['journal']):
                        papers_to_remove.add(paper2['id'])
                    elif pd.isna(paper1['journal']) and pd.notna(paper2['journal']):
                        papers_to_remove.add(paper1['id'])
                    else:
                        undecided_pairs.append((paper1, paper2))
            else:
                # If neither is clearly a preprint, add to manual review
                manual_review_pairs.append((paper1, paper2))
    
    # Filter out the papers
    filtered_df = df[~df['id'].isin(papers_to_remove)]
    
    return filtered_df, undecided_pairs, papers_to_remove, manual_review_pairs

def main():
    # Set up argument parser
    parser = argparse.ArgumentParser(description='Check for redundant papers in paper_info.csv')
    parser.add_argument('--abstract', action='store_true', help='Show similar abstracts check results')
    parser.add_argument('--file', type=str, default='data/paper_info.csv', help='Path to the CSV file to process (default: data/paper_info.csv)')
    parser.add_argument('--output', type=str, help='Path to save the filtered CSV file (default: input_filtered.csv)')
    args = parser.parse_args()

    # Create data directory if it doesn't exist and using default path
    if args.file == 'data/paper_info.csv':
        Path('data').mkdir(exist_ok=True)

    # Set default output filename if not provided
    if not args.output:
        input_name = Path(args.file).stem
        args.output = f"{input_name}_filtered.csv"

    # Load the CSV file
    try:
        df = pd.read_csv(args.file)
    except FileNotFoundError:
        print(f"[ERROR] '{args.file}' not found.")
        return
    except Exception as e:
        print(f"[ERROR] Failed to load CSV: {str(e)}")
        return

    print(f"\n=== Checking for Redundant Papers in {args.file} ===")
    
    # Run all checks
    check_duplicate_ids(df)
    
    # Get similar pairs from check_duplicate_titles
    similar_pairs = []
    titles = df['title'].fillna('').astype(str)
    authors = df['authors'].fillna('').astype(str)
    
    for i in range(len(df)):
        for j in range(i + 1, len(df)):
            if not titles[i] or not titles[j]:
                continue
            
            title_similarity = SequenceMatcher(None, titles[i].lower(), titles[j].lower()).ratio()
            if title_similarity > 0.85 and title_similarity < 1.0:
                author_similarity = 0.0
                if authors[i] and authors[j]:
                    author_similarity = SequenceMatcher(None, authors[i].lower(), authors[j].lower()).ratio()
                
                similar_pairs.append((
                    df.iloc[i]['id'],
                    df.iloc[j]['id'],
                    df.iloc[i]['title'],
                    df.iloc[j]['title'],
                    title_similarity,
                    df.iloc[i]['authors'],
                    df.iloc[j]['authors'],
                    author_similarity,
                    df.iloc[i],
                    df.iloc[j]
                ))
    
    # Filter redundant papers
    filtered_df, undecided_pairs, removed_ids, manual_review_pairs = filter_redundant_papers(df, similar_pairs)
    
    # Print filtering results
    if removed_ids:
        print("\n[!] Removed the following redundant papers and their corresponding versions:")
        removed_papers = df[df['id'].isin(removed_ids)]
        
        # Find and print pairs
        for pair in similar_pairs:
            paper1, paper2 = pair[8], pair[9]  # Full paper info
            if paper1['id'] in removed_ids or paper2['id'] in removed_ids:
                # Determine which is preprint and which is journal version
                if paper1['id'] in removed_ids:
                    preprint = paper1
                    journal = paper2
                else:
                    preprint = paper2
                    journal = paper1
                    
                print("\nRemoved preprint version:")
                print(f"ID: {preprint['id']}")
                print(f"Title: {preprint['title']}")
                print(f"Authors: {preprint['authors']}")
                print(f"URL: {preprint['website_url']}")
                print(f"Journal: {preprint['journal']}")
                print(f"Citations: {preprint['citations']}")
                
                print("\nKept journal version:")
                print(f"ID: {journal['id']}")
                print(f"Title: {journal['title']}")
                print(f"Authors: {journal['authors']}")
                print(f"URL: {journal['website_url']}")
                print(f"Journal: {journal['journal']}")
                print(f"Citations: {journal['citations']}")
                print("-" * 80)
    
    if manual_review_pairs:
        print("\n[!] The following pairs need manual review (no clear preprint/journal distinction):")
        for paper1, paper2 in manual_review_pairs:
            print("\nPair:")
            print(f"Paper 1: {paper1['id']}")
            print(f"Title: {paper1['title']}")
            print(f"Authors: {paper1['authors']}")
            print(f"URL: {paper1['website_url']}")
            print(f"Journal: {paper1['journal']}")
            print(f"Citations: {paper1['citations']}")
            
            print(f"\nPaper 2: {paper2['id']}")
            print(f"Title: {paper2['title']}")
            print(f"Authors: {paper2['authors']}")
            print(f"URL: {paper2['website_url']}")
            print(f"Journal: {paper2['journal']}")
            print(f"Citations: {paper2['citations']}")
            print("-" * 80)
    
    if undecided_pairs:
        print("\n[!] Could not automatically decide between the following preprint pairs:")
        for paper1, paper2 in undecided_pairs:
            print("\nPair:")
            print(f"Paper 1: {paper1['id']}")
            print(f"Title: {paper1['title']}")
            print(f"Authors: {paper1['authors']}")
            print(f"URL: {paper1['website_url']}")
            print(f"Journal: {paper1['journal']}")
            print(f"Citations: {paper1['citations']}")
            
            print(f"\nPaper 2: {paper2['id']}")
            print(f"Title: {paper2['title']}")
            print(f"Authors: {paper2['authors']}")
            print(f"URL: {paper2['website_url']}")
            print(f"Journal: {paper2['journal']}")
            print(f"Citations: {paper2['citations']}")
            print("-" * 80)
    
    # Save filtered dataset
    filtered_df.to_csv(args.output, index=False)
    print(f"\n[✓] Filtered dataset saved to {args.output}")
    print(f"Original papers: {len(df)}")
    print(f"Papers after filtering: {len(filtered_df)}")
    print(f"Removed papers: {len(df) - len(filtered_df)}")
    print(f"Pairs needing manual review: {len(manual_review_pairs)}")

    check_duplicate_urls(filtered_df)
    if args.abstract:
        check_similar_abstracts(filtered_df, show_abstracts=args.abstract)

    print("\n=== Redundancy Check Complete ===")

if __name__ == "__main__":
    main()