from flask import Flask, request, jsonify, render_template
from app.services.crawler import WeChatCrawler
from app.services.pdf_generator import PDFService
from app.models import db, Article
import logging

app = Flask(__name__)
app.config.from_object('config')

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)

@app.route('/')
def index():
    """首页"""
    return render_template('index.html')

@app.route('/api/crawl', methods=['POST'])
def crawl_articles():
    """采集文章接口"""
    try:
        # 获取请求参数
        token = request.form.get('token')
        cookie = request.form.get('cookie')
        fakeid = request.form.get('fakeid')
        
        # 打印请求参数
        app.logger.info('采集文章请求参数: token=%s, fakeid=%s', 
                       token[:10] + '...' if token else None, 
                       fakeid)
        
        # 验证参数
        if not all([token, cookie, fakeid]):
            return jsonify({'error': '参数不完整'}), 400
            
        # 调用爬虫服务
        crawler = WeChatCrawler(token, cookie, fakeid)
        articles = crawler.crawl_articles()
        
        # 保存文章到数据库
        saved_articles = []
        for article in articles:
            # 检查文章是否已存在
            existing = Article.query.filter_by(content_url=article['content_url']).first()
            if not existing:
                # 获取文章内容
                content = crawler.get_article_content(article['content_url'])
                if content:
                    # 创建新文章
                    new_article = Article(
                        title=article['title'],
                        digest=article['digest'],
                        content_url=article['content_url'],
                        content=content,
                        cover=article['cover'],
                        create_time=article['create_time'],
                        update_time=article['update_time'],
                        author=article['author'],
                        copyright_stat=article['copyright_stat']
                    )
                    db.session.add(new_article)
                    saved_articles.append(new_article)
        
        # 提交数据库事务
        db.session.commit()
        
        # 打印响应结果
        app.logger.info('采集文章成功: 获取到 %d 篇文章，保存 %d 篇新文章', 
                       len(articles), len(saved_articles))
        
        return jsonify({
            'message': f'成功采集 {len(articles)} 篇文章，保存 {len(saved_articles)} 篇新文章',
            'articles': [{
                'id': article.id,
                'title': article.title,
                'create_time': article.create_time.isoformat(),
                'pdf_path': article.pdf_path
            } for article in saved_articles]
        })
    except Exception as e:
        app.logger.error('采集文章失败: %s', str(e))
        return jsonify({'error': str(e)}), 500 