#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
微信公众号文章爬虫
支持将文章内容转换为Markdown或HTML格式
"""

import requests
import re
import os
import time
import json
from urllib.parse import urlparse, parse_qs
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import html2text
from markdownify import markdownify as md
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager


class WeChatArticleCrawler:
    def __init__(self, use_selenium=False):
        """
        初始化爬虫
        :param use_selenium: 是否使用Selenium（用于处理JavaScript渲染的页面）
        """
        self.session = requests.Session()
        self.ua = UserAgent()
        self.use_selenium = use_selenium
        self.driver = None
        
        # 设置请求头
        self.headers = {
            'User-Agent': self.ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }
        self.session.headers.update(self.headers)
        
        if self.use_selenium:
            self._init_selenium()
    
    def _init_selenium(self):
        """初始化Selenium WebDriver"""
        chrome_options = Options()
        chrome_options.add_argument('--headless')  # 无头模式
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument(f'--user-agent={self.ua.random}')
        
        try:
            self.driver = webdriver.Chrome(
                service=webdriver.chrome.service.Service(ChromeDriverManager().install()),
                options=chrome_options
            )
        except Exception as e:
            print(f"Selenium初始化失败: {e}")
            print("将使用requests方式爬取")
            self.use_selenium = False
    
    def extract_article_content(self, url):
        """
        提取文章内容
        :param url: 微信文章链接
        :return: 文章信息字典
        """
        try:
            if self.use_selenium and self.driver:
                return self._extract_with_selenium(url)
            else:
                return self._extract_with_requests(url)
        except Exception as e:
            print(f"提取文章内容失败: {e}")
            return None
    
    def _extract_with_requests(self, url):
        """使用requests提取文章内容"""
        # 随机延时，避免被封
        time.sleep(1)
        
        response = self.session.get(url, timeout=30)
        response.raise_for_status()
        
        # 优化编码检测和处理
        if response.encoding is None or response.encoding.lower() in ['iso-8859-1', 'ascii']:
            # 尝试从Content-Type头部获取编码
            content_type = response.headers.get('content-type', '').lower()
            if 'charset=' in content_type:
                charset = content_type.split('charset=')[1].split(';')[0].strip()
                response.encoding = charset
            else:
                # 使用chardet检测编码
                import chardet
                detected = chardet.detect(response.content)
                if detected['encoding'] and detected['confidence'] > 0.7:
                    response.encoding = detected['encoding']
                else:
                    # 默认使用UTF-8
                    response.encoding = 'utf-8'
        
        # 如果仍然检测不到合适的编码，尝试常见的中文编码
        if not response.encoding or response.encoding.lower() == 'iso-8859-1':
            for encoding in ['utf-8', 'gbk', 'gb2312', 'gb18030']:
                try:
                    html_content = response.content.decode(encoding)
                    break
                except (UnicodeDecodeError, LookupError):
                    continue
            else:
                # 如果所有编码都失败，使用错误处理
                html_content = response.content.decode('utf-8', errors='ignore')
        else:
            html_content = response.text
        
        return self._parse_article_content(html_content, url)
    
    def _extract_with_selenium(self, url):
        """使用Selenium提取文章内容"""
        self.driver.get(url)
        
        # 等待页面加载完成
        WebDriverWait(self.driver, 10).until(
            EC.presence_of_element_located((By.ID, "js_content"))
        )
        
        html_content = self.driver.page_source
        return self._parse_article_content(html_content, url)
    
    def _parse_article_content(self, html_content, url):
        """解析文章内容"""
        soup = BeautifulSoup(html_content, 'lxml')
        
        # 提取文章标题
        title_elem = soup.find('h1', {'id': 'activity-name'}) or soup.find('h2', {'class': 'rich_media_title'})
        title = title_elem.get_text().strip() if title_elem else "未知标题"
        
        # 提取作者信息
        author_elem = soup.find('a', {'class': 'rich_media_meta_link'}) or soup.find('span', {'class': 'rich_media_meta_text'})
        author = author_elem.get_text().strip() if author_elem else "未知作者"
        
        # 提取发布时间
        time_elem = soup.find('em', {'id': 'publish_time'}) or soup.find('span', {'class': 'rich_media_meta_text'})
        publish_time = ""
        if time_elem:
            time_text = time_elem.get_text().strip()
            # 提取时间信息
            time_match = re.search(r'\d{4}-\d{2}-\d{2}', time_text)
            if time_match:
                publish_time = time_match.group()
        
        # 提取文章正文内容
        content_elem = soup.find('div', {'id': 'js_content'})
        if not content_elem:
            content_elem = soup.find('div', {'class': 'rich_media_content'})
        
        if not content_elem:
            raise Exception("未找到文章内容")
        
        # 清理内容
        self._clean_content(content_elem)
        
        # 提取纯文本内容
        text_content = content_elem.get_text().strip()
        
        # 获取HTML内容
        html_content = str(content_elem)
        
        return {
            'title': title,
            'author': author,
            'publish_time': publish_time,
            'url': url,
            'text_content': text_content,
            'html_content': html_content,
            'soup_content': content_elem
        }
    
    def _clean_content(self, content_elem):
        """清理文章内容，移除不必要的元素"""
        # 移除脚本和样式
        for script in content_elem(["script", "style"]):
            script.decompose()
        
        # 移除一些微信特有的元素
        for elem in content_elem.find_all(['section'], {'class': re.compile(r'.*powered.*|.*copyright.*')}):
            elem.decompose()
        
        # 处理图片链接
        for img in content_elem.find_all('img'):
            if img.get('data-src'):
                img['src'] = img['data-src']
    
    def save_as_markdown(self, article_info, output_dir="output"):
        """保存为Markdown格式"""
        if not article_info:
            return None
        
        os.makedirs(output_dir, exist_ok=True)
        
        # 生成安全的文件名（保留中文字符）
        safe_title = article_info['title']
        # 移除文件名中不允许的字符
        invalid_chars = r'[<>:"/\\|?*]'
        safe_title = re.sub(invalid_chars, '', safe_title).strip()
        # 限制文件名长度
        if len(safe_title) > 100:
            safe_title = safe_title[:100]
        
        filename = f"{safe_title}.md"
        filepath = os.path.join(output_dir, filename)
        
        # 优化HTML到Markdown的转换
        try:
            # 使用markdownify转换，保持更好的格式
            markdown_content = md(
                article_info['html_content'], 
                heading_style="ATX",  # 使用 # 风格的标题
                bullets="-",  # 使用 - 作为列表符号
                strip=['script', 'style']  # 移除脚本和样式
            )
        except Exception as e:
            print(f"Markdown转换失败，使用备用方法: {e}")
            # 备用方法：使用html2text
            import html2text
            h = html2text.HTML2Text()
            h.ignore_links = False
            h.ignore_images = False
            h.body_width = 0  # 不限制行宽
            markdown_content = h.handle(article_info['html_content'])
        
        # 添加文章元信息
        metadata = f"""# {article_info['title']}

**作者**: {article_info['author']}  
**发布时间**: {article_info['publish_time']}  
**原文链接**: {article_info['url']}

---

"""
        
        full_content = metadata + markdown_content
        
        # 确保使用UTF-8编码保存
        try:
            with open(filepath, 'w', encoding='utf-8', newline='') as f:
                f.write(full_content)
            print(f"Markdown文件已保存: {filepath}")
            return filepath
        except Exception as e:
            print(f"保存Markdown文件失败: {e}")
            return None
    
    def save_as_html(self, article_info, output_dir="output"):
        """保存为HTML格式"""
        if not article_info:
            return None
        
        os.makedirs(output_dir, exist_ok=True)
        
        # 生成安全的文件名（保留中文字符）
        safe_title = article_info['title']
        # 移除文件名中不允许的字符
        invalid_chars = r'[<>:"/\\|?*]'
        safe_title = re.sub(invalid_chars, '', safe_title).strip()
        # 限制文件名长度
        if len(safe_title) > 100:
            safe_title = safe_title[:100]
        
        filename = f"{safe_title}.html"
        filepath = os.path.join(output_dir, filename)
        
        # 创建完整的HTML文档
        html_template = f"""<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>{article_info['title']}</title>
    <style>
        body {{
            font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'PingFang SC', 'Hiragino Sans GB', 'Microsoft YaHei', 'Helvetica Neue', Helvetica, Arial, sans-serif;
            line-height: 1.6;
            color: #333;
            max-width: 800px;
            margin: 0 auto;
            padding: 20px;
        }}
        .article-header {{
            border-bottom: 1px solid #eee;
            padding-bottom: 20px;
            margin-bottom: 30px;
        }}
        .article-title {{
            font-size: 28px;
            font-weight: bold;
            margin-bottom: 10px;
        }}
        .article-meta {{
            color: #666;
            font-size: 14px;
        }}
        .article-content {{
            font-size: 16px;
        }}
        .article-content img {{
            max-width: 100%;
            height: auto;
        }}
    </style>
</head>
<body>
    <div class="article-header">
        <h1 class="article-title">{article_info['title']}</h1>
        <div class="article-meta">
            <p><strong>作者</strong>: {article_info['author']}</p>
            <p><strong>发布时间</strong>: {article_info['publish_time']}</p>
            <p><strong>原文链接</strong>: <a href="{article_info['url']}" target="_blank">{article_info['url']}</a></p>
        </div>
    </div>
    <div class="article-content">
        {article_info['html_content']}
    </div>
</body>
</html>"""
        
        # 确保使用UTF-8编码保存
        try:
            with open(filepath, 'w', encoding='utf-8', newline='') as f:
                f.write(html_template)
            print(f"HTML文件已保存: {filepath}")
            return filepath
        except Exception as e:
            print(f"保存HTML文件失败: {e}")
            return None
    
    def crawl_and_save(self, url, format_type="both", output_dir="output"):
        """
        爬取文章并保存
        :param url: 微信文章链接
        :param format_type: 保存格式 ("markdown", "html", "both")
        :param output_dir: 输出目录
        :return: 保存的文件路径列表
        """
        print(f"开始爬取文章: {url}")
        
        # 验证URL
        if not self._is_valid_wechat_url(url):
            print("错误: 不是有效的微信公众号文章链接")
            return []
        
        # 提取文章内容
        article_info = self.extract_article_content(url)
        if not article_info:
            print("提取文章内容失败")
            return []
        
        print(f"文章标题: {article_info['title']}")
        print(f"作者: {article_info['author']}")
        
        saved_files = []
        
        # 保存文件
        if format_type in ["markdown", "both"]:
            md_file = self.save_as_markdown(article_info, output_dir)
            if md_file:
                saved_files.append(md_file)
        
        if format_type in ["html", "both"]:
            html_file = self.save_as_html(article_info, output_dir)
            if html_file:
                saved_files.append(html_file)
        
        return saved_files
    
    def _is_valid_wechat_url(self, url):
        """验证是否为有效的微信公众号文章链接"""
        wechat_patterns = [
            r'mp\.weixin\.qq\.com',
            r'weixin\.qq\.com'
        ]
        
        for pattern in wechat_patterns:
            if re.search(pattern, url):
                return True
        return False
    
    def close(self):
        """关闭资源"""
        if self.driver:
            self.driver.quit()


def main():
    """主函数"""
    print("微信公众号文章爬虫")
    print("=" * 50)
    
    # 获取用户输入
    url = input("请输入微信公众号文章链接: ").strip()
    
    if not url:
        print("错误: 请提供有效的文章链接")
        return
    
    # 选择输出格式
    print("\n选择输出格式:")
    print("1. Markdown")
    print("2. HTML")
    print("3. 两种格式都保存")
    
    choice = input("请选择 (1-3): ").strip()
    format_map = {
        "1": "markdown",
        "2": "html",
        "3": "both"
    }
    format_type = format_map.get(choice, "both")
    
    # 是否使用Selenium
    use_selenium = input("\n是否使用Selenium处理JavaScript渲染? (y/n): ").strip().lower() == 'y'
    
    # 创建爬虫实例
    crawler = WeChatArticleCrawler(use_selenium=use_selenium)
    
    try:
        # 爬取并保存文章
        saved_files = crawler.crawl_and_save(url, format_type)
        
        if saved_files:
            print(f"\n成功保存 {len(saved_files)} 个文件:")
            for file_path in saved_files:
                print(f"  - {file_path}")
        else:
            print("\n没有文件被保存")
    
    except KeyboardInterrupt:
        print("\n用户中断操作")
    except Exception as e:
        print(f"\n发生错误: {e}")
    finally:
        crawler.close()


if __name__ == "__main__":
    main()