import os
import time
import datetime
from config.settings import BASE_DIR, logger
from spiders.national_news_spider import NationalNewsSpider
from spiders.bt_news_spider import BTNewsSpider
from spiders.meiye_news_spider import MeiyeNewsSpider
from spiders.oil_news_spider import OilNewsSpider


def main():
    """
    主程序入口
    """
    logger.info("开始生成班超要闻")
    today = datetime.date.today()

    # 创建分类文件夹
    folders = ['yuanshineirong', 'zongjie', 'doc']
    for folder in folders:
        folder_path = os.path.join(BASE_DIR, folder)
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
            logger.info(f"创建文件夹: {folder_path}")

    # 创建原始内容文件
    today_date = today.strftime("%Y%m%d")
    raw_filename = os.path.join(BASE_DIR, "yuanshineirong", f"原始内容_{today_date}.txt")
    with open(raw_filename, 'w', encoding='utf-8') as raw_file:
        raw_file.write(f"原始内容 - {today_date}\n")
        raw_file.write("=" * 50 + "\n\n")

    # 爬取国内国际新闻
    try:
        logger.info("开始爬取国内国际新闻...")
        national_spider = NationalNewsSpider()
        national_spider.crawl()  # 只爬取，不总结
        logger.info("国内/国际新闻获取完成")
    except Exception as e:
        logger.error(f"获取国内/国际新闻时出错: {str(e)}")
        import traceback
        traceback.print_exc()
        
    time.sleep(20)  # 增加爬虫间隔时间，避免请求过快
    
    # 爬取兵团新闻
    try:
        logger.info("开始爬取兵团新闻...")
        bt_spider = BTNewsSpider()
        bt_spider.crawl()  # 只爬取，不总结
        logger.info("兵团新闻获取完成")
    except Exception as e:
        logger.error(f"获取兵团新闻时出错: {str(e)}")
        import traceback
        traceback.print_exc()
        
    # 爬取镁业新闻
    try:
        logger.info("开始爬取镁业新闻...")
        me_spider = MeiyeNewsSpider()
        me_spider.crawl()  # 只爬取，不总结
        logger.info("镁业新闻获取完成")
    except Exception as e:
        logger.error(f"获取镁业新闻时出错: {str(e)}")
        import traceback
        traceback.print_exc()
        
    # 爬取国际原油信息
    time.sleep(20)
    try:
        logger.info("开始爬取国际原油信息...")
        oil_spider = OilNewsSpider()
        oil_spider.crawl()  # 只爬取，不总结
        logger.info("国际原油信息获取完成")
    except Exception as e:
        logger.error(f"获取国际原油信息时出错: {str(e)}")
        import traceback
        traceback.print_exc()

    logger.info("所有原始内容已保存完成")


if __name__ == '__main__':
    try:
        main()
    except KeyboardInterrupt:
        logger.info("用户中断程序执行")
    except Exception as e:
        logger.error(f"程序执行出错: {str(e)}")
        import traceback
        traceback.print_exc()