import sys
import os
# 获取项目根目录
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_root)

import threading
import tkinter as tk
from datetime import datetime
from app.app_gui import InfoAnalyzerApp
from crawlers.international import InternationalNewsCrawler
from crawlers.national import NationalNewsCrawler
from visualization.wordcloud_generator import WordCloudGenerator

def crawl_international():
    """国际新闻爬取线程"""
    print(f"\n开始爬取 {datetime.now().strftime('%Y-%m-%d')} 的新浪国际新闻...")
    international_crawler = InternationalNewsCrawler()
    international_crawler.crawl_news_with_bs4()

def crawl_national():
    """国内新闻爬取线程"""
    print("开始爬取新浪国内当天新闻...")
    national_crawler = NationalNewsCrawler()
    try:
        today_news = national_crawler.crawl_today_news(max_pages=5)
        print(f"共获取到 {len(today_news)} 条当天新闻")
        national_crawler.save_news_data(today_news)
    except Exception as e:
        print(f"国内新闻爬取过程中发生错误: {str(e)}")

def generate_wordclouds():
    """词云生成（需等待爬虫完成）"""
    today = datetime.now().date()
    national_csv = f"data/csv/{today}national_news.csv"
    international_csv = f"data/csv/{today}international_news.csv"
    generator = WordCloudGenerator()
    stopwords = generator.load_stopwords()
    
    print("\n正在生成国内新闻词云...")
    generator.generate_wordcloud_from_csv(national_csv, "national_wordcloud.png", stopwords)
    print("\n正在生成国际新闻词云...")
    generator.generate_wordcloud_from_csv(international_csv, "international_wordcloud.png", stopwords)

if __name__ == "__main__":
    
    # 创建并启动爬虫线程
    t1 = threading.Thread(target=crawl_international)
    t2 = threading.Thread(target=crawl_national)
    
    t1.start()
    t2.start()

    # 等待爬虫线程完成
    t1.join()
    t2.join()

    # 生成词云（单线程，依赖爬虫结果）
    generate_wordclouds()

    # 启动GUI
    root = tk.Tk()
    app = InfoAnalyzerApp(root)
    root.mainloop()