#!/usr/bin/env python3
"""
API interaction functions for SERP API and Google Scholar.
"""

import time
import requests
from .network import create_session_with_retries

def search_google_scholar(keyword, api_key, num_results=80):
    """Search Google Scholar using SERP API."""
    base_url = "https://serpapi.com/search"
    
    results = []
    start = 0
    
    # Google Scholar typically returns 10 results per page
    results_per_page = 10
    
    # Calculate how many pages to fetch
    num_pages = (num_results + results_per_page - 1) // results_per_page
    
    for page in range(num_pages):
        params = {
            "engine": "google_scholar",
            "q": keyword,
            "api_key": api_key,
            "start": start,
            "num": min(results_per_page, num_results - len(results))
        }
        
        try:
            session = create_session_with_retries()
            response = session.get(base_url, params=params)
            response.raise_for_status()
            data = response.json()
            
            if "organic_results" in data and data["organic_results"]:
                results.extend(data["organic_results"])
                
                # If we've reached our target number of results, stop
                if len(results) >= num_results:
                    break
                
                # Increment the start parameter for pagination
                start += results_per_page
                
                # Be kind to the API by adding a small delay between requests
                time.sleep(1)
            else:
                # No more results
                break
            
        except requests.exceptions.RequestException as e:
            print(f"Error during API request: {e}")
            break
            
    return results[:num_results]  # Ensure we don't return more than requested

def process_search_result(result):
    """Process a search result to extract relevant information."""
    paper = {
        'id': result.get('result_id', ''),
        'title': result.get('title', 'Unknown Title'),
        'website_url': '',
        'download_url': '',
        'abstract': result.get('snippet', ''),
        'doi': None,
        'citations': 0,
        'authors': '',
        'publication_date': '',
        'journal': '',
        'citation_link': '',
        'author_year': ''  # New field for first author and year
    }
    
    # Get and clean the website URL
    from .web_utils import extract_actual_url_from_wayback
    
    original_url = result.get('link', '')
    paper['original_url'] = original_url
    paper['website_url'] = extract_actual_url_from_wayback(original_url)
    
    # Look for PDF links in resources
    if 'resources' in result:
        pdf_urls = []
        for resource in result['resources']:
            if resource.get('file_format', '').lower() == 'pdf':
                # Clean the PDF URL as well
                pdf_url = resource.get('link', '')
                cleaned_url = extract_actual_url_from_wayback(pdf_url)
                if cleaned_url and cleaned_url not in pdf_urls:
                    pdf_urls.append(cleaned_url)
        
        if pdf_urls:
            paper['download_url'] = ','.join(pdf_urls)
    
    # Extract publication information
    if 'publication_info' in result:
        pub_info = result.get('publication_info', {})
        summary = pub_info.get('summary', '')
        paper['publication_info'] = summary
        
        # Try to extract authors and journal
        if summary:
            parts = summary.split(' - ')
            if len(parts) >= 2:
                paper['authors'] = parts[0]
                journal_parts = parts[1].split(', ')
                if len(journal_parts) >= 2:
                    paper['journal'] = journal_parts[0]
                    # Try to extract publication date
                    try:
                        paper['publication_date'] = journal_parts[1]
                    except IndexError:
                        pass
    
    # Extract citation information
    if 'inline_links' in result:
        inline_links = result.get('inline_links', {})
        
        # Get citation count
        cited_by = inline_links.get('cited_by', {})
        if cited_by:
            paper['citations'] = cited_by.get('total', 0)
            paper['citation_link'] = cited_by.get('link', '')
            paper['citation_id'] = cited_by.get('cites_id', '')
            # Add SerpApi citation link for potential further scraping
            if 'serpapi_scholar_link' in cited_by:
                paper['serpapi_citation_link'] = cited_by.get('serpapi_scholar_link', '')
    
    # Extract first author and publication year for author_year field
    import re
    
    first_author = ""
    pub_year = ""
    
    # Get first author if available
    if paper['authors']:
        # Split by comma and get the first author
        authors_list = paper['authors'].split(',')
        if authors_list:
            first_author = authors_list[0].strip()
    
    # Get publication year if available
    if paper['publication_date']:
        # Try to extract a 4-digit year
        year_match = re.search(r'\b(19|20)\d{2}\b', paper['publication_date'])
        if year_match:
            pub_year = year_match.group(0)
    
    # Combine author and year if both are available
    if first_author and pub_year:
        paper['author_year'] = f"{first_author}, {pub_year}"
    elif first_author:
        paper['author_year'] = first_author
    elif pub_year:
        paper['author_year'] = pub_year
    
    return paper 