#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
TryExponent.com 本地镜像服务器

这个脚本提供了一个本地HTTP服务器，用于浏览爬取的TryExponent.com网站内容。
它会修复HTML文件中的资源路径，使其指向本地文件，实现1:1还原原网站的访问体验。
"""

import os
import re
import json
import logging
import sqlite3
import argparse
from urllib.parse import urlparse, urljoin, unquote
from http.server import HTTPServer, SimpleHTTPRequestHandler
from pathlib import Path
from bs4 import BeautifulSoup

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("server.log"),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger("LocalServer")


class PathFixer:
    """HTML文件路径修复工具"""

    def __init__(self, output_dir, base_url="https://www.tryexponent.com"):
        """初始化路径修复工具

        Args:
            output_dir: 爬虫输出目录
            base_url: 原网站基础URL
        """
        self.output_dir = os.path.abspath(output_dir)
        self.base_url = base_url
        self.db_path = os.path.join(output_dir, "index.db")

        # 连接数据库
        try:
            self.conn = sqlite3.connect(self.db_path)
            self.cursor = self.conn.cursor()
        except sqlite3.Error as e:
            logger.error(f"连接数据库失败: {e}")
            raise

        # 资源映射缓存
        self.resource_map = {}
        self._load_resource_map()

        logger.info("路径修复工具初始化完成")

    def _load_resource_map(self):
        """从数据库加载资源映射"""
        try:
            self.cursor.execute("SELECT url, local_path FROM resources")
            for url, local_path in self.cursor.fetchall():
                if local_path:
                    rel_path = os.path.relpath(local_path, self.output_dir)
                    self.resource_map[url] = f"/{rel_path.replace(os.sep, '/')}"
        except sqlite3.Error as e:
            logger.error(f"加载资源映射失败: {e}")

    def fix_all_files(self):
        """修复所有HTML文件中的路径"""
        articles_dir = os.path.join(self.output_dir, "articles")

        # 遍历所有HTML文件
        for root, _, files in os.walk(articles_dir):
            for file in files:
                if file.endswith(".html"):
                    file_path = os.path.join(root, file)
                    self.fix_file(file_path)

        logger.info("所有文件路径修复完成")

    def fix_file(self, file_path):
        """修复单个HTML文件中的路径

        Args:
            file_path: HTML文件路径
        """
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                html = f.read()

            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(html, 'lxml')

            # 修复链接
            self._fix_links(soup, file_path)

            # 修复图片
            self._fix_images(soup, file_path)

            # 修复CSS
            self._fix_css(soup, file_path)

            # 修复JavaScript
            self._fix_js(soup, file_path)

            # 保存修复后的HTML
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(str(soup))

            logger.info(f"文件路径修复完成: {file_path}")
        except Exception as e:
            logger.error(f"修复文件失败: {file_path}, 错误: {e}")

    def _fix_links(self, soup, file_path):
        """修复HTML中的链接

        Args:
            soup: BeautifulSoup对象
            file_path: 当前HTML文件路径
        """
        for a_tag in soup.find_all('a', href=True):
            href = a_tag['href']

            # 跳过外部链接和锚点链接
            if href.startswith('#') or href.startswith('mailto:') or href.startswith('tel:'):
                continue

            # 构建完整URL
            full_url = urljoin(self.base_url, href)

            # 只处理同域名的URL
            if urlparse(full_url).netloc == urlparse(self.base_url).netloc:
                # 查找对应的本地路径
                local_path = self._get_local_path(full_url)
                if local_path:
                    # 计算相对路径
                    rel_path = os.path.relpath(
                        os.path.join(self.output_dir, local_path.lstrip('/')),
                        os.path.dirname(file_path)
                    )
                    a_tag['href'] = rel_path.replace(os.sep, '/')

    def _fix_images(self, soup, file_path):
        """修复HTML中的图片路径

        Args:
            soup: BeautifulSoup对象
            file_path: 当前HTML文件路径
        """
        for img_tag in soup.find_all('img', src=True):
            src = img_tag['src']

            # 构建完整URL
            full_url = urljoin(self.base_url, src)

            # 查找对应的本地路径
            local_path = self._get_local_path(full_url)
            if local_path:
                # 计算相对路径
                rel_path = os.path.relpath(
                    os.path.join(self.output_dir, local_path.lstrip('/')),
                    os.path.dirname(file_path)
                )
                img_tag['src'] = rel_path.replace(os.sep, '/')

    def _fix_css(self, soup, file_path):
        """修复HTML中的CSS路径

        Args:
            soup: BeautifulSoup对象
            file_path: 当前HTML文件路径
        """
        for link_tag in soup.find_all('link', rel='stylesheet', href=True):
            href = link_tag['href']

            # 构建完整URL
            full_url = urljoin(self.base_url, href)

            # 查找对应的本地路径
            local_path = self._get_local_path(full_url)
            if local_path:
                # 计算相对路径
                rel_path = os.path.relpath(
                    os.path.join(self.output_dir, local_path.lstrip('/')),
                    os.path.dirname(file_path)
                )
                link_tag['href'] = rel_path.replace(os.sep, '/')

    def _fix_js(self, soup, file_path):
        """修复HTML中的JavaScript路径

        Args:
            soup: BeautifulSoup对象
            file_path: 当前HTML文件路径
        """
        for script_tag in soup.find_all('script', src=True):
            src = script_tag['src']

            # 构建完整URL
            full_url = urljoin(self.base_url, src)

            # 查找对应的本地路径
            local_path = self._get_local_path(full_url)
            if local_path:
                # 计算相对路径
                rel_path = os.path.relpath(
                    os.path.join(self.output_dir, local_path.lstrip('/')),
                    os.path.dirname(file_path)
                )
                script_tag['src'] = rel_path.replace(os.sep, '/')

    def _get_local_path(self, url):
        """获取URL对应的本地路径

        Args:
            url: 完整URL

        Returns:
            str: 本地路径，如果不存在则返回None
        """
        # 检查缓存
        if url in self.resource_map:
            return self.resource_map[url]

        # 查询数据库
        try:
            self.cursor.execute("SELECT local_path FROM resources WHERE url = ?", (url,))
            result = self.cursor.fetchone()
            if result and result[0]:
                rel_path = os.path.relpath(result[0], self.output_dir)
                local_path = f"/{rel_path.replace(os.sep, '/')}"
                self.resource_map[url] = local_path
                return local_path
        except sqlite3.Error as e:
            logger.error(f"查询资源路径失败: {url}, 错误: {e}")

        # 尝试根据URL路径推断本地路径
        parsed_url = urlparse(url)
        if parsed_url.netloc == urlparse(self.base_url).netloc:
            path = parsed_url.path.strip('/')
            if path:
                # 检查文章路径
                article_path = os.path.join(self.output_dir, "articles", path)
                if os.path.exists(f"{article_path}.html"):
                    local_path = f"/articles/{path}.html"
                    self.resource_map[url] = local_path
                    return local_path

                # 检查资源路径
                for resource_type in ['pdf', 'images']:
                    resource_path = os.path.join(self.output_dir, "resources", resource_type, os.path.basename(path))
                    if os.path.exists(resource_path):
                        local_path = f"/resources/{resource_type}/{os.path.basename(path)}"
                        self.resource_map[url] = local_path
                        return local_path

        return None

    def close(self):
        """关闭连接"""
        if hasattr(self, 'conn') and self.conn:
            self.conn.close()


class ExponentHTTPRequestHandler(SimpleHTTPRequestHandler):
    """自定义HTTP请求处理器"""

    # 类变量，在创建实例时会被覆盖
    output_dir = './output'

    def __init__(self, *args, **kwargs):
        # 不需要在这里设置output_dir，因为它已经是类变量
        super().__init__(*args, **kwargs)

    def translate_path(self, path):
        """重写路径转换方法，将URL路径映射到本地文件系统路径

        Args:
            path: 请求路径

        Returns:
            str: 本地文件系统路径
        """
        # 解码URL路径
        path = unquote(path)

        # 移除查询参数
        path = path.split('?', 1)[0]
        path = path.split('#', 1)[0]

        # 规范化路径
        path = path.strip('/')

        # 处理根路径请求
        if not path:
            return os.path.join(self.output_dir, 'index.html')

        # 处理普通路径
        local_path = os.path.join(self.output_dir, path)

        # 如果是目录，尝试查找index.html
        if os.path.isdir(local_path):
            index_path = os.path.join(local_path, 'index.html')
            if os.path.exists(index_path):
                return index_path

        # 如果文件不存在，尝试添加.html后缀
        if not os.path.exists(local_path) and not path.endswith('.html'):
            html_path = f"{local_path}.html"
            if os.path.exists(html_path):
                return html_path

        # 处理tryexponent.com风格的URL
        if not os.path.exists(local_path):
            # 尝试在articles目录下查找
            parts = path.split('/')
            if len(parts) >= 1:
                category = parts[0]
                article_path = os.path.join(self.output_dir, 'articles', category)

                if len(parts) >= 2:
                    # 尝试查找具体文章
                    article_name = parts[1]
                    article_file = os.path.join(article_path, f"{article_name}.html")
                    if os.path.exists(article_file):
                        return article_file

                # 尝试查找类别首页
                category_index = os.path.join(article_path, f"{category}.html")
                if os.path.exists(category_index):
                    return category_index

        return local_path

    def log_message(self, format, *args):
        """重写日志方法，使用自定义日志器"""
        logger.info(f"{self.address_string()} - {format % args}")


def run_server(output_dir, port=8000):
    """运行本地HTTP服务器

    Args:
        output_dir: 爬虫输出目录
        port: 服务器端口
    """
    server_address = ('', port)

    # 创建自定义处理器类，传入输出目录
    handler_class = type(
        'CustomHandler',
        (ExponentHTTPRequestHandler,),
        {'output_dir': os.path.abspath(output_dir)}
    )

    httpd = HTTPServer(server_address, handler_class)

    logger.info(f"启动本地服务器，地址: http://localhost:{port}/")
    logger.info(f"服务目录: {output_dir}")
    logger.info("按Ctrl+C停止服务器")

    try:
        httpd.serve_forever()
    except KeyboardInterrupt:
        logger.info("服务器已停止")
    finally:
        httpd.server_close()


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='TryExponent.com 本地镜像服务器')
    parser.add_argument('-d', '--dir', default='./output', help='爬虫输出目录')
    parser.add_argument('-p', '--port', type=int, default=8000, help='服务器端口')
    parser.add_argument('--fix-paths', action='store_true', help='修复HTML文件中的路径')

    args = parser.parse_args()

    # 检查输出目录是否存在
    if not os.path.exists(args.dir):
        logger.error(f"输出目录不存在: {args.dir}")
        return

    # 修复路径
    if args.fix_paths:
        try:
            fixer = PathFixer(args.dir)
            fixer.fix_all_files()
            fixer.close()
        except Exception as e:
            logger.error(f"路径修复失败: {e}")
            return

    # 运行服务器
    run_server(args.dir, args.port)


if __name__ == "__main__":
    main()
