#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
微信公众号文章爬虫 RESTful API 服务
提供HTTP接口，通过链接获取文章标题和正文内容
"""

from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
import json
import time
import traceback
import os
import hashlib
from datetime import datetime
from wechat_crawler import WeChatArticleCrawler
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 创建Flask应用
app = Flask(__name__)
CORS(app)  # 允许跨域请求

# 全局爬虫实例（复用连接）
crawler = None

def get_crawler():
    """获取爬虫实例（单例模式）"""
    global crawler
    if crawler is None:
        crawler = WeChatArticleCrawler(use_selenium=False)
    return crawler

def ensure_files_directory():
    """确保文件存储目录存在"""
    files_dir = os.path.join(os.getcwd(), 'api_files')
    if not os.path.exists(files_dir):
        os.makedirs(files_dir)
    return files_dir

def generate_file_id(title, url):
    """生成文件唯一ID"""
    content = f"{title}_{url}_{datetime.now().isoformat()}"
    return hashlib.md5(content.encode('utf-8')).hexdigest()[:12]

def save_content_to_file(article_info, content, format_type):
    """保存内容到文件并返回文件信息"""
    try:
        files_dir = ensure_files_directory()
        
        # 生成安全的文件名
        title = article_info['title']
        safe_title = "".join(c for c in title if c.isalnum() or c in (' ', '-', '_')).rstrip()
        safe_title = safe_title[:50]  # 限制长度
        
        file_id = generate_file_id(title, article_info['url'])
        
        # 确定文件扩展名
        extensions = {'text': 'txt', 'html': 'html', 'markdown': 'md'}
        ext = extensions.get(format_type, 'txt')
        
        filename = f"{safe_title}_{file_id}.{ext}"
        filepath = os.path.join(files_dir, filename)
        
        # 保存文件
        with open(filepath, 'w', encoding='utf-8') as f:
            if format_type == 'html':
                # 为HTML添加完整的文档结构
                html_content = f"""<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>{article_info['title']}</title>
    <style>
        body {{ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; 
               line-height: 1.6; max-width: 800px; margin: 0 auto; padding: 20px; }}
        .article-meta {{ color: #666; margin-bottom: 20px; border-bottom: 1px solid #eee; padding-bottom: 10px; }}
        .article-content {{ margin-top: 20px; }}
    </style>
</head>
<body>
    <div class="article-meta">
        <h1>{article_info['title']}</h1>
        <p>作者: {article_info['author']}</p>
        <p>发布时间: {article_info['publish_time']}</p>
        <p>原文链接: <a href="{article_info['url']}" target="_blank">{article_info['url']}</a></p>
    </div>
    <div class="article-content">
        {content}
    </div>
</body>
</html>"""
                f.write(html_content)
            elif format_type == 'markdown':
                # 为Markdown添加元信息
                md_content = f"""# {article_info['title']}

**作者**: {article_info['author']}  
**发布时间**: {article_info['publish_time']}  
**原文链接**: [{article_info['url']}]({article_info['url']})

---

{content}
"""
                f.write(md_content)
            else:
                # 纯文本格式
                text_content = f"""{article_info['title']}

作者: {article_info['author']}
发布时间: {article_info['publish_time']}
原文链接: {article_info['url']}

{'-' * 50}

{content}
"""
                f.write(text_content)
        
        # 获取文件大小
        file_size = os.path.getsize(filepath)
        
        # 构建文件URL
        file_url = f"/api/files/{filename}"
        
        return {
            'url': file_url,
            'path': filename,
            'size': file_size,
            'format': format_type
        }
        
    except Exception as e:
        logger.error(f"保存文件时发生错误: {str(e)}")
        raise Exception(f"文件保存失败: {str(e)}")

@app.route('/', methods=['GET'])
def index():
    """API首页"""
    return jsonify({
        'service': '微信公众号文章爬虫API',
        'version': '1.0.0',
        'endpoints': {
            'GET /': '服务信息',
            'POST /api/crawl': '爬取文章内容',
            'GET /api/health': '健康检查'
        },
        'usage': {
            'url': '/api/crawl',
            'method': 'POST',
            'body': {
                'url': '微信公众号文章链接',
                'format': 'text|html|markdown (可选，默认text)'
            }
        }
    })

@app.route('/api/health', methods=['GET'])
def health_check():
    """健康检查接口"""
    return jsonify({
        'status': 'healthy',
        'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),
        'service': 'wechat-article-crawler-api'
    })

@app.route('/api/crawl', methods=['POST'])
def crawl_article():
    """
    爬取微信公众号文章接口
    
    请求体:
    {
        "url": "微信公众号文章链接",
        "format": "text|html|markdown" (可选，默认text)
    }
    
    响应:
    {
        "success": true,
        "data": {
            "title": "文章标题",
            "author": "作者",
            "publish_time": "发布时间",
            "url": "原文链接",
            "content": "文章内容",
            "content_type": "内容格式"
        },
        "message": "处理成功"
    }
    """
    try:
        # 获取请求数据
        if not request.is_json:
            return jsonify({
                'success': False,
                'error': 'Content-Type必须为application/json',
                'code': 'INVALID_CONTENT_TYPE'
            }), 400
        
        data = request.get_json()
        
        # 验证必需参数
        if not data or 'url' not in data:
            return jsonify({
                'success': False,
                'error': '缺少必需参数: url',
                'code': 'MISSING_URL'
            }), 400
        
        url = data['url'].strip()
        format_type = data.get('format', 'text').lower()
        response_type = data.get('response_type', 'content').lower()  # 新增：响应类型
        
        # 验证URL格式
        if not url:
            return jsonify({
                'success': False,
                'error': 'URL不能为空',
                'code': 'EMPTY_URL'
            }), 400
        
        # 验证是否为微信链接
        crawler_instance = get_crawler()
        if not crawler_instance._is_valid_wechat_url(url):
            return jsonify({
                'success': False,
                'error': '不是有效的微信公众号文章链接',
                'code': 'INVALID_WECHAT_URL'
            }), 400
        
        # 验证格式参数
        if format_type not in ['text', 'html', 'markdown']:
            return jsonify({
                'success': False,
                'error': 'format参数必须为: text, html, markdown',
                'code': 'INVALID_FORMAT'
            }), 400
        
        # 验证响应类型参数
        if response_type not in ['content', 'file', 'both']:
            return jsonify({
                'success': False,
                'error': 'response_type参数必须为: content(返回内容), file(返回文件链接), both(返回两者)',
                'code': 'INVALID_RESPONSE_TYPE'
            }), 400
        
        logger.info(f"开始爬取文章: {url}")
        
        # 爬取文章内容
        article_info = crawler_instance.extract_article_content(url)
        
        if not article_info:
            return jsonify({
                'success': False,
                'error': '无法提取文章内容，可能是链接无效或网络问题',
                'code': 'EXTRACTION_FAILED'
            }), 500
        
        # 根据格式要求处理内容
        if format_type == 'text':
            content = article_info['text_content']
        elif format_type == 'html':
            content = article_info['html_content']
        elif format_type == 'markdown':
            from markdownify import markdownify as md
            content = md(article_info['html_content'])
        
        # 构建基础响应数据
        response_data = {
            'title': article_info['title'],
            'author': article_info['author'],
            'publish_time': article_info['publish_time'],
            'url': article_info['url'],
            'content_type': format_type,
            'content_length': len(content)
        }
        
        # 根据响应类型处理
        if response_type == 'content':
            # 直接返回内容
            response_data['content'] = content
            
        elif response_type == 'file':
            # 保存文件并返回链接
            file_info = save_content_to_file(article_info, content, format_type)
            response_data['file_url'] = file_info['url']
            response_data['file_path'] = file_info['path']
            response_data['file_size'] = file_info['size']
            
        elif response_type == 'both':
            # 返回内容和文件链接
            response_data['content'] = content
            file_info = save_content_to_file(article_info, content, format_type)
            response_data['file_url'] = file_info['url']
            response_data['file_path'] = file_info['path']
            response_data['file_size'] = file_info['size']
        
        logger.info(f"成功爬取文章: {article_info['title']}")
        
        return jsonify({
            'success': True,
            'data': response_data,
            'message': '文章爬取成功',
            'timestamp': time.strftime('%Y-%m-%d %H:%M:%S')
        })
    
    except Exception as e:
        logger.error(f"爬取文章时发生错误: {str(e)}")
        logger.error(traceback.format_exc())
        
        return jsonify({
            'success': False,
            'error': f'服务器内部错误: {str(e)}',
            'code': 'INTERNAL_ERROR'
        }), 500

@app.route('/api/batch-crawl', methods=['POST'])
def batch_crawl_articles():
    """
    批量爬取微信公众号文章接口
    
    请求体:
    {
        "urls": ["链接1", "链接2", ...],
        "format": "text|html|markdown" (可选，默认text)
    }
    
    响应:
    {
        "success": true,
        "data": {
            "total": 总数,
            "success_count": 成功数,
            "failed_count": 失败数,
            "results": [
                {
                    "url": "原链接",
                    "success": true,
                    "data": {...} 或 "error": "错误信息"
                }
            ]
        }
    }
    """
    try:
        # 获取请求数据
        if not request.is_json:
            return jsonify({
                'success': False,
                'error': 'Content-Type必须为application/json',
                'code': 'INVALID_CONTENT_TYPE'
            }), 400
        
        data = request.get_json()
        
        # 验证必需参数
        if not data or 'urls' not in data:
            return jsonify({
                'success': False,
                'error': '缺少必需参数: urls',
                'code': 'MISSING_URLS'
            }), 400
        
        urls = data['urls']
        format_type = data.get('format', 'text').lower()
        
        # 验证URLs
        if not isinstance(urls, list) or len(urls) == 0:
            return jsonify({
                'success': False,
                'error': 'urls必须是非空数组',
                'code': 'INVALID_URLS'
            }), 400
        
        # 限制批量处理数量
        if len(urls) > 10:
            return jsonify({
                'success': False,
                'error': '批量处理最多支持10个链接',
                'code': 'TOO_MANY_URLS'
            }), 400
        
        # 验证格式参数
        if format_type not in ['text', 'html', 'markdown']:
            return jsonify({
                'success': False,
                'error': 'format参数必须为: text, html, markdown',
                'code': 'INVALID_FORMAT'
            }), 400
        
        logger.info(f"开始批量爬取 {len(urls)} 个文章")
        
        crawler_instance = get_crawler()
        results = []
        success_count = 0
        failed_count = 0
        
        for url in urls:
            try:
                url = url.strip()
                
                # 验证单个URL
                if not crawler_instance._is_valid_wechat_url(url):
                    results.append({
                        'url': url,
                        'success': False,
                        'error': '不是有效的微信公众号文章链接'
                    })
                    failed_count += 1
                    continue
                
                # 爬取文章
                article_info = crawler_instance.extract_article_content(url)
                
                if not article_info:
                    results.append({
                        'url': url,
                        'success': False,
                        'error': '无法提取文章内容'
                    })
                    failed_count += 1
                    continue
                
                # 处理内容格式
                if format_type == 'text':
                    content = article_info['text_content']
                elif format_type == 'html':
                    content = article_info['html_content']
                elif format_type == 'markdown':
                    from markdownify import markdownify as md
                    content = md(article_info['html_content'])
                
                results.append({
                    'url': url,
                    'success': True,
                    'data': {
                        'title': article_info['title'],
                        'author': article_info['author'],
                        'publish_time': article_info['publish_time'],
                        'content': content,
                        'content_type': format_type,
                        'content_length': len(content)
                    }
                })
                success_count += 1
                
                # 添加延时避免被封
                time.sleep(1)
                
            except Exception as e:
                logger.error(f"处理URL {url} 时发生错误: {str(e)}")
                results.append({
                    'url': url,
                    'success': False,
                    'error': str(e)
                })
                failed_count += 1
        
        logger.info(f"批量爬取完成: 成功 {success_count}, 失败 {failed_count}")
        
        return jsonify({
            'success': True,
            'data': {
                'total': len(urls),
                'success_count': success_count,
                'failed_count': failed_count,
                'results': results
            },
            'message': f'批量爬取完成: 成功 {success_count}, 失败 {failed_count}',
            'timestamp': time.strftime('%Y-%m-%d %H:%M:%S')
        })
    
    except Exception as e:
        logger.error(f"批量爬取时发生错误: {str(e)}")
        logger.error(traceback.format_exc())
        
        return jsonify({
            'success': False,
            'error': f'服务器内部错误: {str(e)}',
            'code': 'INTERNAL_ERROR'
        }), 500

@app.errorhandler(404)
def not_found(error):
    """404错误处理"""
    return jsonify({
        'success': False,
        'error': '接口不存在',
        'code': 'NOT_FOUND'
    }), 404

@app.route('/api/files/<filename>', methods=['GET'])
def serve_file(filename):
    """文件服务接口"""
    try:
        files_dir = ensure_files_directory()
        filepath = os.path.join(files_dir, filename)
        
        # 检查文件是否存在
        if not os.path.exists(filepath):
            return jsonify({
                'success': False,
                'error': '文件不存在',
                'code': 'FILE_NOT_FOUND'
            }), 404
        
        # 检查文件是否在允许的目录内（安全检查）
        if not os.path.abspath(filepath).startswith(os.path.abspath(files_dir)):
            return jsonify({
                'success': False,
                'error': '文件访问被拒绝',
                'code': 'ACCESS_DENIED'
            }), 403
        
        # 根据文件扩展名设置Content-Type
        content_types = {
            '.txt': 'text/plain; charset=utf-8',
            '.md': 'text/markdown; charset=utf-8',
            '.html': 'text/html; charset=utf-8'
        }
        
        file_ext = os.path.splitext(filename)[1].lower()
        content_type = content_types.get(file_ext, 'application/octet-stream')
        
        return send_file(
            filepath,
            mimetype=content_type,
            as_attachment=False,  # 在浏览器中直接显示
            download_name=filename
        )
        
    except Exception as e:
        logger.error(f"文件服务错误: {str(e)}")
        return jsonify({
            'success': False,
            'error': f'文件服务错误: {str(e)}',
            'code': 'FILE_SERVICE_ERROR'
        }), 500

@app.route('/api/files/<filename>/download', methods=['GET'])
def download_file(filename):
    """文件下载接口"""
    try:
        files_dir = ensure_files_directory()
        filepath = os.path.join(files_dir, filename)
        
        if not os.path.exists(filepath):
            return jsonify({
                'success': False,
                'error': '文件不存在',
                'code': 'FILE_NOT_FOUND'
            }), 404
        
        if not os.path.abspath(filepath).startswith(os.path.abspath(files_dir)):
            return jsonify({
                'success': False,
                'error': '文件访问被拒绝',
                'code': 'ACCESS_DENIED'
            }), 403
        
        return send_file(
            filepath,
            as_attachment=True,  # 强制下载
            download_name=filename
        )
        
    except Exception as e:
        logger.error(f"文件下载错误: {str(e)}")
        return jsonify({
            'success': False,
            'error': f'文件下载错误: {str(e)}',
            'code': 'FILE_DOWNLOAD_ERROR'
        }), 500

@app.errorhandler(405)
def method_not_allowed(error):
    """405错误处理"""
    return jsonify({
        'success': False,
        'error': '请求方法不允许',
        'code': 'METHOD_NOT_ALLOWED'
    }), 405

@app.errorhandler(500)
def internal_error(error):
    """500错误处理"""
    return jsonify({
        'success': False,
        'error': '服务器内部错误',
        'code': 'INTERNAL_ERROR'
    }), 500

def main():
    """启动服务器"""
    port = 8080  # 使用8080端口避免与AirPlay冲突
    print("微信公众号文章爬虫 API 服务")
    print("=" * 50)
    print(f"服务地址: http://localhost:{port}")
    print(f"API文档: http://localhost:{port}")
    print(f"健康检查: http://localhost:{port}/api/health")
    print("=" * 50)
    
    # 启动服务器
    app.run(
        host='0.0.0.0',  # 允许外部访问
        port=port,
        debug=False,  # 生产环境关闭调试模式
        threaded=True  # 支持多线程
    )

if __name__ == '__main__':
    main()