#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse
import os
import re
import sys
from typing import Tuple, Optional, Dict, Any
import requests
from bs4 import BeautifulSoup, Tag

# 尝试导入markdownify，如果失败则使用备用方案
try:
    from markdownify import markdownify as md
except ImportError:
    # 备用方案：使用BeautifulSoup提取文本
    def md(html, **kwargs):
        soup = BeautifulSoup(html, "html.parser")
        return soup.get_text()

from urllib.parse import urljoin


# 默认请求头
DEFAULT_HEADERS = {
    "User-Agent": (
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
        "AppleWebKit/537.36 (KHTML, like Gecko) "
        "Chrome/122.0.0.0 Safari/537.36"
    ),
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Referer": "https://www.cnblogs.com/",
    "Connection": "keep-alive",
}


def log_info(message: str) -> None:
    print(f"[INFO] {message}")


def log_warn(message: str) -> None:
    print(f"[WARN] {message}")


def log_error(message: str) -> None:
    print(f"[ERROR] {message}", file=sys.stderr)


class HttpClient:
    def __init__(self, headers: Optional[Dict[str, str]] = None, timeout: int = 20):
        self.session = requests.Session()
        self.session.headers.update(headers or DEFAULT_HEADERS)
        self.timeout = timeout

    def get(self, url: str) -> str:
        """获取网页内容"""
        try:
            resp = self.session.get(url, timeout=self.timeout)
            resp.raise_for_status()  # 如果状态码不是200会抛出异常
        except requests.RequestException as exc:
            raise Exception(f"请求失败: {exc}")
        
        resp.encoding = resp.apparent_encoding or "utf-8"
        return resp.text


def sanitize_filename(name: str) -> str:
    """清理文件名中的非法字符"""
    sanitized = re.sub(r'[\\/:*?"<>|]', "_", name).strip()
    sanitized = re.sub(r"\s+", " ", sanitized)
    return sanitized[:180] if len(sanitized) > 180 else sanitized


def ensure_dir(path: str) -> None:
    """确保目录存在"""
    os.makedirs(path, exist_ok=True)


def extract_title_and_body(soup: BeautifulSoup) -> Tuple[str, str]:
    """提取文章标题和正文"""
    # 提取标题
    title_candidates = [
        soup.find(id="cb_post_title_url"),
        soup.find("h1", id="cb_post_title_url"),
        soup.find("h1", class_=re.compile("post|title", re.I)),
        soup.find("a", id="cb_post_title_url"),
    ]
    title = None
    for t in title_candidates:
        if t and t.get_text(strip=True):
            title = t.get_text(strip=True)
            break
    if not title:
        title_tag = soup.find("title")
        title = title_tag.get_text(strip=True) if title_tag else "untitled"

    # 提取正文
    body = soup.find(id="cnblogs_post_body")
    if not body:
        body = soup.find(class_=re.compile("post|content|body", re.I))
    if not body:
        raise RuntimeError("未找到正文容器，页面结构可能已变化。")
    
    # 确保body是Tag对象而不是NavigableString
    if not isinstance(body, Tag):
        raise RuntimeError("正文容器不是有效的Tag对象。")

    # 清理不需要的元素
    if isinstance(body, Tag):
        for selector in [
            "script", "style", "noscript",
            "div#MySignature", "div#MyTopNavigator", "div#MyBottomNavigator",
            "div.recommend_btns", "div#div_digg", "div#opt_under_post",
            "div#cnblogs_c1", "div#cnblogs_c2", "div#blog_post_info_block",
            "div#ad_t2", "div#ad_c1", "div#ad_c2",
            "iframe", "ins", "aside", "footer",
        ]:
            for x in body.select(selector):
                x.decompose()

        # 清理锚点链接
        for x in body.find_all("a", href=True):
            href = x.get("href", "")
            if isinstance(href, str) and href.startswith("#"):
                x.attrs.pop("href", None)

    return title, str(body)


def fix_image_sources(body_html: str, base_url: str) -> str:
    """修复图片源链接"""
    soup = BeautifulSoup(body_html, "lxml")
    for img in soup.find_all("img"):
        src_candidates = [
            img.get("data-src"),
            img.get("data-original"),
            img.get("src"),
        ]
        if not any(src_candidates) and img.get("srcset"):
            srcset = img.get("srcset")
            if isinstance(srcset, str):
                first = srcset.split(",")[0].strip().split(" ")[0]
                src_candidates = [first]
        real = next((s for s in src_candidates if s), None)
        if not real:
            continue
        if isinstance(real, str):
            real = real.strip()
            if real.startswith("//"):
                real = "https:" + real
            elif not real.startswith("http"):
                real = urljoin(base_url, real)
            img["src"] = real
        for attr in ["srcset", "data-src", "data-original", "data-lazy-src", "loading"]:
            if attr in img.attrs:
                img.attrs.pop(attr, None)
    return str(soup)


def html_to_markdown(html: str) -> str:
    """将HTML转换为Markdown"""
    try:
        return md(html, heading_style="ATX", bullets="-*+")
    except Exception:
        try:
            return md(html)
        except Exception:
            try:
                soup = BeautifulSoup(html, "lxml")
                return soup.get_text("\n")
            except Exception:
                return html


def fetch_article_content(client: HttpClient, article_url: str) -> Tuple[str, str]:
    """获取文章内容"""
    html = client.get(article_url)
    soup = BeautifulSoup(html, "lxml")
    title, body_html = extract_title_and_body(soup)
    body_html = fix_image_sources(body_html, article_url)
    md_text = html_to_markdown(body_html)
    return title, md_text


def save_article_to_markdown(title: str, markdown_content: str, output_dir: str = ".") -> str:
    """保存文章为Markdown文件"""
    ensure_dir(output_dir)
    filename = sanitize_filename(title) or "untitled"
    path = os.path.join(output_dir, f"{filename}.md")

    base, ext = os.path.splitext(path)
    index = 1
    while os.path.exists(path):
        path = f"{base} ({index}){ext}"
        index += 1

    with open(path, "w", encoding="utf-8", newline="\n") as f:
        f.write(markdown_content)
    
    return path


def download_single_article(url: str, output_dir: str = ".") -> None:
    """下载单篇文章"""
    log_info(f"开始下载文章: {url}")
    
    client = HttpClient()
    try:
        title, md_text = fetch_article_content(client, url)
        saved_path = save_article_to_markdown(title, md_text, output_dir)
        log_info(f"文章《{title}》已保存至: {saved_path}")
    except Exception as e:
        log_error(f"下载文章失败: {e}")
        raise


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="下载博客园单篇文章")
    parser.add_argument("url", help="文章URL，格式如 https://www.cnblogs.com/yxh168/p/12408800.html")
    parser.add_argument("-o", "--output-dir", default=".", help="输出目录，默认为当前目录")
    return parser.parse_args()


def main():
    """主函数"""
    args = parse_args()
    
    # 验证URL格式
    if not re.match(r"https://www\.cnblogs\.com/.+/p/\d+\.html", args.url):
        log_error("URL格式不正确，请使用类似 https://www.cnblogs.com/yxh168/p/12408800.html 的格式")
        sys.exit(1)
    
    try:
        download_single_article(args.url, args.output_dir)
        log_info("下载完成")
    except KeyboardInterrupt:
        log_warn("用户中断")
        sys.exit(1)
    except Exception as e:
        log_error(f"下载失败: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()