from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from apscheduler.schedulers.background import BackgroundScheduler
from config import Config

db = SQLAlchemy()

def create_app(config_class=Config):
    app = Flask(__name__)

    # 强制设置日志级别为INFO，以确保CLI命令能输出日志
    import logging
    app.logger.setLevel(logging.INFO)

    app.config.from_object(config_class)

    db.init_app(app)

    with app.app_context():
        from . import models
        db.create_all()

    from .routes import bp as main_bp
    app.register_blueprint(main_bp)

    # Defer the import of the scraper and job function to avoid circular dependency
    from .scraper import scrape_by_category

    # 从配置中获取要抓取的分类
    categories_to_scrape = app.config['CATEGORIES']

    scheduler = BackgroundScheduler()

    def create_job(category, url):
        # 需要在 app context 中运行
        return lambda: app.app_context().push() or scrape_by_category(category, url)

    for category, url in categories_to_scrape.items():
        job_id = f'oschina_scraper_{category}'
        scheduler.add_job(
            func=create_job(category, url),
            trigger="interval", 
            hours=1, 
            id=job_id, 
            replace_existing=True
        )

    scheduler.start()

    return app
