import os
import random
import re
import time
from datetime import datetime

import requests
from bs4 import BeautifulSoup


class WxCrawler:
    """直接运行本文件时，会抓取一个示例文章链接，保存内容并打印文章信息。"""

    def __init__(self):
        # 设置请求头，模拟浏览器访问
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive",
        }
        # 设置请求间隔，避免频繁请求被封
        self.min_delay = 2
        self.max_delay = 5
        self.wx_article_save_dir = "wx_articles_parsed"

    def crawl_article(self, article_url):
        """抓取微信公众号文章内容"""
        try:
            # 随机延时，避免请求过于频繁
            delay = random.uniform(self.min_delay, self.max_delay)
            print(f"等待 {delay:.2f} 秒后请求...")
            time.sleep(delay)

            # 发送请求获取文章页面
            response = requests.get(article_url, headers=self.headers)
            response.raise_for_status()  # 检查请求是否成功
            html_content = response.text
            # 解析文章内容
            article_data = self._parse_article(html_content)

            if article_data:
                # 保存文章到本地
                md_file_name = self._save_article(article_data)
                print(
                    f"文章《{article_data['title']}》已成功保存到本地, 文件名：{md_file_name}"
                )
                # 将文章页面保存到本地，与 md_file_name 同名，而后缀是 html 方便调试
                html_file_name = f"{md_file_name}.html"
                with open(html_file_name, "w", encoding="utf-8") as f:
                    f.write(html_content)
                print(
                    f"文章《{article_data['title']}》已成功保存到本地, 文件名：{html_file_name}"
                )
                return article_data
            else:
                print("未能提取到文章内容")
                return None

        except requests.exceptions.RequestException as e:
            print(f"请求出错: {e}")
            return None
        except Exception as e:
            print(f"抓取过程中发生错误: {e}")
            return None

    def _parse_article(self, html_content):
        """解析微信公众号文章内容"""
        soup = BeautifulSoup(html_content, "html.parser")

        # 尝试提取文章信息
        try:
            # 提取标题
            title_element = soup.select_one("#activity-name")
            title = title_element.text.strip() if title_element else "未找到标题"

            # 提取作者
            author_element = soup.select_one("#js_name")
            author = author_element.text.strip() if author_element else "未找到作者"

            # 提取发布时间
            time_element = soup.select_one("#publish_time")
            publish_time = (
                time_element.text.strip()
                if time_element
                else datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            )

            # 提取正文内容
            content_element = soup.select_one("#js_content")
            content_html = (
                str(content_element) if content_element else "<p>未找到正文内容</p>"
            )

            # 处理正文中的图片，提取图片链接
            img_tags = content_element.find_all("img") if content_element else []
            img_links = []
            for img in img_tags:
                src = img.get("data-src") or img.get("src")
                if src:
                    img_links.append(src)

            # 构建文章数据
            article_data = {
                "title": title,
                "author": author,
                "publish_time": publish_time,
                "content_html": content_html,
                "img_links": img_links,
            }

            return article_data

        except Exception as e:
            print(f"解析文章内容时出错: {e}")
            return None

    def _save_article(self, article_data) -> str:
        """保存文章到本地文件"""
        # 创建保存目录
        if not os.path.exists(self.mdtool.article_save_dir):
            os.makedirs(self.mdtool.article_save_dir)

        # 生成文件名（使用标题和时间戳）
        safe_title = re.sub(r'[\\/:*?"<>|]', "_", article_data["title"])
        timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
        file_name = f"{self.mdtool.article_save_dir}/{safe_title}_{timestamp}.md"

        # 构建 Markdown 内容
        md_content = f"""# {article_data['title']}

**作者**: {article_data['author']}
**发布时间**: {article_data['publish_time']}

## 正文内容

{self._convert_html_to_markdown(article_data['content_html'])}

## 图片链接

"""
        # 添加图片链接列表
        for i, img_url in enumerate(article_data["img_links"], 1):
            md_content += f"{i}. [图片 {i}]({img_url})\n"

        # 保存到文件
        with open(file_name, "w", encoding="utf-8") as f:
            f.write(md_content)
        return file_name

    def _convert_html_to_markdown(self, html_content):
        """简单地将 HTML 转换为 Markdown（基本实现）"""
        # TODO: 完善转换逻辑
        # 这是一个简化的实现，实际应用中可能需要更复杂的转换
        # 移除 HTML 标签，保留文本内容
        soup = BeautifulSoup(html_content, "html.parser")
        return soup.get_text("\n\n")
