#!/usr/bin/env python
import os
import sys
import time
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from finance_scraper.spiders.finance_ministry import FinanceMinistrySpider
import importlib.util
import importlib.machinery

def main():
    # Get project settings
    settings = get_project_settings()
    
    # Create crawler process
    process = CrawlerProcess(settings)
    
    # Add spider to process
    process.crawl(FinanceMinistrySpider)
    
    # Start crawling
    print("Starting finance data crawler...")
    process.start()  # This will block until the crawling is finished
    
    # After crawling is done, generate visualization
    print("Crawling finished. Generating visualization...")
    
    # Wait a bit to ensure all files are closed
    time.sleep(1)
    
    # Load visualization module
    vis_path = os.path.join(os.path.dirname(__file__), 'visualization.py')
    if os.path.exists(vis_path):
        # Import the module
        spec = importlib.util.spec_from_file_location("visualization", vis_path)
        vis_module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(vis_module)
        
        # Generate visualization
        visualizer = vis_module.FinanceDataVisualizer()
        visualizer.generate_visualization()
        
        # Open the HTML file in the default browser
        html_path = os.path.join(os.getcwd(), 'finance_visualization.html')
        if os.path.exists(html_path):
            import webbrowser
            webbrowser.open('file://' + html_path)
            print(f"Visualization opened in browser: {html_path}")
    else:
        print(f"Visualization module not found at {vis_path}")

if __name__ == "__main__":
    main() 