# 爬虫独立运行文件 完整案例

import logging
import os
import platform
import time
from logging.handlers import RotatingFileHandler
import requests
import re
from flask import Flask, request, jsonify, g
from flask_cors import CORS
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_caching import Cache
from playwright.sync_api import sync_playwright
from bs4 import BeautifulSoup
import werkzeug.exceptions as http_exceptions

app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False  # 关闭默认ASCII转义
app.config['JSONIFY_MIMETYPE'] = 'application/json; charset=utf-8'  # 全局设置MIMETYPE
app.json.ensure_ascii = False  # 解决编码问题
CORS(app)  # 启用跨域访问

cache = Cache(config={'CACHE_TYPE': 'SimpleCache'})  # 启用缓存
cache.init_app(app)

limiter = Limiter(app=app, key_func=get_remote_address, default_limits=["60 per minute"])  # 配置速率限制（每分钟60次）

# 配置日志记录器（以下是操作方法）
# app.logger.debug('This is a debug message')
# app.logger.info('This is an info message')
# app.logger.warning('This is a warning message')
# app.logger.error('This is an error message')

# 确保日志目录存在
os.makedirs('logs', exist_ok=True)
log_file_path = 'logs/app.log'  # 日志文件路径
handler = RotatingFileHandler(log_file_path, maxBytes=10240, backupCount=5)
handler.setLevel(logging.DEBUG)  # 设置日志级别为 DEBUG
formatter = logging.Formatter('[%(asctime)s] - %(name)s - %(levelname)s in %(module)s: %(message)s')
handler.setFormatter(formatter)
app.logger.addHandler(handler)

# 确保应用日志记录器的级别也为 DEBUG
app.logger.setLevel(logging.DEBUG)


# 响应工具类
class ResponseUtil:
    @staticmethod
    def success(data=None):
        response = jsonify({
            "code": 200,
            "msg": "success",
            "data": data or {}
        })
        response.headers['Content-Type'] = 'application/json; charset=utf-8'  # 强制指定编码
        return response

    @staticmethod
    def error(code, msg):
        response = jsonify({
            "code": code,
            "msg": msg,
            "data": {}
        })
        response.headers['Content-Type'] = 'application/json; charset=utf-8'  # 强制指定编码
        return response


# API异常处理
class APIException(Exception):
    def __init__(self, code=500, msg="Internal Server Error"):
        self.code = code
        self.msg = msg
        super().__init__()


# 爬虫构造类
class PlaywrightCrawler:
    def __init__(self):
        self.min_content_length = 200

    # 异步方法
    def scrape(self, url, wait_time):
        try:
            # 先尝试静态抓取
            static_text = self.scrape_static_content(url)
            if static_text and len(static_text) > self.min_content_length:
                return static_text

            # 静态失败时启用动态渲染
            dynamic_text = self.scrape_dynamic_content(url, wait_time)
            if dynamic_text:
                return dynamic_text
        except APIException as e:
            raise e
        except Exception as e:
            raise APIException(msg=str(e))

    # 文本清洗优化
    def _clean_text(self, text):
        text = re.sub(r'\s+', ' ', text)
        text = re.sub(r'[\x00-\x1F\x7F]', '', text)
        return text.strip()

    # 爬取静态内容
    def scrape_static_content(self, url):
        try:
            response = requests.get(url, timeout=10)
            response.encoding = response.apparent_encoding  # 自动检测编码
            soup = BeautifulSoup(response.text, 'html.parser')

            # 移除不需要的元素
            for element in soup(['script', 'style', 'noscript', 'svg', 'nav', 'footer', 'head', 'meta']):
                element.decompose()

            # 获取纯文本
            text = soup.get_text(separator='\n', strip=True)
            text = self._clean_text(text)

            return text
        except APIException as e:
            raise e
        except Exception as e:
            raise APIException(msg=str(e))

    # 抓取动态内容
    def scrape_dynamic_content(self, url, wait_time):
        try:
            with sync_playwright() as p:
                # browser = p.chromium.launch() # ubuntu系统 可以不用配置参数

                # 判断是否为 Windows 系统
                if platform.system() == 'Windows':
                    executable_path = "C:/Users/Administrator/AppData/Local/Google/Chrome/Bin/chrome.exe"
                else:
                    executable_path = None

                browser = p.chromium.launch(
                    headless=True,
                    executable_path=executable_path,
                    args=[
                        "--no-sandbox",
                        "--disable-dev-shm-usage",
                        "--ignore-certificate-errors",
                        "--ignore-urlfetcher-cert-requests"
                    ]
                )  # 无头模式

                browser = browser.new_context(ignore_https_errors=True)

                page = browser.new_page()

                # 设置超时和加载策略
                page.set_default_timeout(60000)  # 60秒超时
                page.goto(url, wait_until="domcontentloaded")

                wait_time = int(wait_time) if wait_time is not None else 1  # 设置默认值为 0

                # 等待动态内容加载（根据实际需求调整）
                if wait_time > 0:
                    time.sleep(wait_time)  # 简单等待，可替换为 page.wait_for_selector()
                else:
                    page.wait_for_selector('div')

                # 获取完整渲染后的 HTML
                html_content = page.content()
                soup = BeautifulSoup(html_content, 'html.parser')

                # # 移除不需要的元素
                for element in soup(['script', 'style', 'noscript', 'svg', 'nav', 'footer', 'head', 'meta']):
                    element.decompose()

                # 示例：提取标题（按需自定义解析逻辑）
                title = page.title()

                # 智能定位正文
                html_content = soup.find(['article', 'main', 'div.content']) or soup.body
                html_content = html_content.get_text(separator=' ', strip=True)

                browser.close()

                return html_content
        except APIException as e:
            raise e
        except Exception as e:
            raise APIException(msg=str(e))


# ----------------------------
# 请求结束后记录访问日志（关键）
# ----------------------------
@app.after_request
def log_access(response):
    # 记录请求耗时、路径、方法、状态码、客户端IP
    # access_log = {
    #     'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),
    #     'method': request.method,
    #     'path': request.path,
    #     'status': response.status_code,
    #     'ip': request.remote_addr,
    #     'response_time': round(g.get('response_time', 0) * 1000, 2)  # 毫秒
    # }
    # access_logger.info(access_log)
    return response


# ----------------------------
# 记录请求开始时间
# ----------------------------
@app.before_request
def record_start_time():
    g.start_time = time.time()


# ----------------------------
# 计算响应时间
# ----------------------------
@app.after_request
def calculate_response_time(response):
    g.response_time = time.time() - g.start_time
    return response


# ----------------------------
# 错误处理
# ----------------------------
@app.errorhandler(Exception)
def handle_exception(e):
    # 处理自定义异常
    if isinstance(e, APIException):
        return ResponseUtil.error(e.code, e.msg), 200

    # 处理 HTTP 异常
    if isinstance(e, http_exceptions.HTTPException):
        return ResponseUtil.error(e.code, e.description), 200

    # 其他未知异常
    return ResponseUtil.error(500, str(e)), 200


# ----------------------------
# 网站首页
# ----------------------------
@app.route("/")
@limiter.limit("60 per minute")
@cache.cached(timeout=300, query_string=True)
def hello():
    return "星数引擎驱动"


# ----------------------------
# 查询文本信息
# ----------------------------
@app.route('/crawler/fetch/text', methods=['GET', 'POST'])
@limiter.limit("60 per minute")
@cache.cached(timeout=300, query_string=True)
def scrape_handler():
    target_url = request.args.get('url')
    wait_time = request.args.get('time')

    # app.logger.error('This is an error message')

    if not target_url:
        return jsonify({'code': 400, 'message': 'URL is required'}), 400

    crawler = PlaywrightCrawler()

    html_content = crawler.scrape(target_url, wait_time)
    return ResponseUtil.success({"content": html_content})


# ----------------------------
# 访问服务
# ----------------------------
if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=False)
