from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.edge.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import requests
import time
import os
import json
from datetime import datetime
from PIL import Image, ImageDraw
from docx import Document
from docx.shared import Inches, Pt
import re

def format_create_time(create_time_str):
    """格式化时间：将 create_time 格式化为 'YYYY-MM-DDTHH:MM:SS'"""
    try:
        dt = datetime.strptime(create_time_str.split(".")[0], '%Y-%m-%dT%H:%M:%S')
        return dt.strftime('%Y-%m-%dT%H:%M:%S')
    except ValueError as e:
        print(f"时间格式转换失败: {e}")
        return create_time_str

def download_and_process_avatar(avatar_url, author, avatar_dir):
    """下载头像并处理为圆形"""
    avatar_filename = f"{author}.png".replace(" ", "_").replace("/", "_")
    avatar_path = os.path.join(avatar_dir, avatar_filename)
    
    try:
        avatar_data = requests.get(avatar_url, timeout=10).content
        with open(avatar_path, 'wb') as avatar_file:
            avatar_file.write(avatar_data)
        
        avatar_image = Image.open(avatar_path).convert("RGBA")
        avatar_image = avatar_image.resize((300, 300), Image.LANCZOS)
        
        # 创建圆形头像蒙版
        mask = Image.new('L', (300, 300), 0)
        draw = ImageDraw.Draw(mask)
        draw.ellipse((0, 0, 300, 300), fill=255)
        
        # 将蒙版应用到头像
        avatar_image.putalpha(mask)
        avatar_image.save(avatar_path)
        
        print(f"圆形头像 {avatar_filename} 已成功保存。")
        return avatar_path
    except Exception as e:
        print(f"处理头像 {avatar_filename} 时出错: {e}")
        return None

def download_image(image_url, image_id, image_dir):
    """下载图片"""
    image_filename = f"{image_id}.jpg"
    image_path = os.path.join(image_dir, image_filename)
    
    try:
        img_data = requests.get(image_url, timeout=10).content
        with open(image_path, 'wb') as img_file:
            img_file.write(img_data)
        print(f"图片 {image_filename} 已成功下载。")
        return image_path
    except Exception as e:
        print(f"下载图片 {image_filename} 时出错: {e}")
        return None

def extract_topic_from_element(element):
    """从DOM元素中提取文章数据（参考 Selenium IDE 中的 app-footprint-topic 结构）"""
    topic_data = {}
    try:
        # 提取文章ID（从链接或data属性中）
        try:
            # 查找所有包含 footprint 的链接
            links = element.find_elements(By.CSS_SELECTOR, "a[href*='footprint']")
            for link in links:
                href = link.get_attribute('href')
                if href:
                    topic_id_match = re.search(r'/footprint/(\d+)', href)
                    if topic_id_match:
                        topic_data['topic_id'] = topic_id_match.group(1)
                        break
        except:
            pass
        
        # 提取作者信息
        try:
            author_selectors = [
                ".user-name", ".author-name", "[class*='name']",
                "span[class*='nickname']", ".nickname"
            ]
            for selector in author_selectors:
                try:
                    author_elem = element.find_element(By.CSS_SELECTOR, selector)
                    topic_data['author'] = author_elem.text.strip()
                    break
                except:
                    continue
        except:
            pass
        
        # 提取头像
        try:
            avatar_selectors = [
                "img[class*='avatar']", ".avatar img", "img[class*='user']"
            ]
            for selector in avatar_selectors:
                try:
                    avatar_elem = element.find_element(By.CSS_SELECTOR, selector)
                    topic_data['avatar_url'] = avatar_elem.get_attribute('src')
                    break
                except:
                    continue
        except:
            pass
        
        # 提取日期
        try:
            date_selectors = [
                ".time", ".date", "[class*='time']", "time",
                "[data-time]", "[datetime]"
            ]
            for selector in date_selectors:
                try:
                    date_elem = element.find_element(By.CSS_SELECTOR, selector)
                    topic_data['create_time'] = date_elem.get_attribute('datetime') or date_elem.text.strip()
                    break
                except:
                    continue
        except:
            pass
        
        # 提取内容（参考 Selenium IDE：.content 选择器）
        try:
            content_selectors = [
                ".content",  # 参考 Selenium IDE 中的选择器
                "app-talk-content .content",
                ".text", "[class*='content']",
                "[class*='text']", ".post-content", "article"
            ]
            for selector in content_selectors:
                try:
                    content_elem = element.find_element(By.CSS_SELECTOR, selector)
                    topic_data['text'] = content_elem.text.strip()
                    break
                except:
                    continue
        except:
            pass
        
        # 提取图片（参考 Selenium IDE：.image-container 和 .item 选择器）
        try:
            # 参考 Selenium IDE 中的图片选择器
            image_selectors = [
                ".image-container img",
                ".image-container .item",
                "app-image-gallery img",
                "img[class*='image']", 
                "img[class*='photo']", 
                ".content img"
            ]
            topic_data['images'] = []
            for selector in image_selectors:
                try:
                    images = element.find_elements(By.CSS_SELECTOR, selector)
                    for img in images:
                        src = img.get_attribute('src') or img.get_attribute('data-src')
                        if src and 'avatar' not in src.lower() and 'logo' not in src.lower() and 'icon' not in src.lower():
                            # 提取原始图片URL（去除缩略图参数）
                            if 'thumbnail' in src:
                                # 尝试获取原始图片URL
                                original_src = src.replace('/thumbnail/', '/').replace('thumbnail/', '')
                                topic_data['images'].append(original_src)
                            else:
                                topic_data['images'].append(src)
                    if topic_data['images']:
                        break
                except:
                    continue
        except:
            pass
        
    except Exception as e:
        print(f"提取文章数据时出错: {e}")
    
    return topic_data

def get_all_topics_from_page(driver, footprint_url, target_count=3970):
    """从指定页面采集所有内容（不按作者过滤）"""
    all_topics = []
    
    try:
        # 访问目标页面
        print("正在访问目标页面...")
        if footprint_url.startswith('/'):
            base_url = "https://wx.zsxq.com"
            full_url = base_url + footprint_url
        else:
            full_url = footprint_url
        driver.get(full_url)
        time.sleep(5)  # 等待页面加载
        
        # 设置窗口大小
        try:
            driver.set_window_size(1234, 1020)
            print("窗口大小已设置为 1234x1020")
        except:
            pass
        
        # 等待页面加载完成
        try:
            WebDriverWait(driver, 20).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            # 等待内容元素出现（尝试多种可能的选择器）
            try:
                WebDriverWait(driver, 20).until(
                    EC.presence_of_element_located((By.CSS_SELECTOR, "app-footprint-topic"))
                )
            except:
                # 如果 app-footprint-topic 不存在，尝试其他选择器
                try:
                    WebDriverWait(driver, 10).until(
                        EC.presence_of_element_located((By.CSS_SELECTOR, "[class*='topic'], [class*='footprint'], [class*='item']"))
                    )
                except:
                    pass
            print("页面加载完成")
        except:
            print("页面加载超时，继续尝试...")
        
        print(f"开始采集页面中的所有内容（目标: {target_count} 条）...")
        print("=" * 60)
        
        collected_topic_ids = set()
        scroll_count = 0
        max_scrolls = 1000  # 增加最大滚动次数以支持采集大量内容
        no_new_content_count = 0
        max_no_new_content = 10  # 增加连续无新内容的容忍次数
        
        # 初始化滚动位置，确保从顶部开始
        driver.execute_script("window.scrollTo(0, 0);")
        time.sleep(0.5)
        last_scroll_position = driver.execute_script("return window.pageYOffset || document.documentElement.scrollTop || 0")
        
        # 通过滚动页面加载更多内容
        while scroll_count < max_scrolls and len(all_topics) < target_count:
            # 获取当前页面高度和滚动位置
            last_height = driver.execute_script("return document.body.scrollHeight")
            current_scroll = driver.execute_script("return window.pageYOffset || document.documentElement.scrollTop || 0")
            
            # 提取当前页面上的所有文章元素
            try:
                # 尝试多种选择器来查找文章元素
                topic_elements = []
                selectors = [
                    "app-footprint-topic",
                    "[class*='footprint-topic']",
                    "[class*='topic-item']",
                    "[class*='post-item']",
                    "[class*='content-item']"
                ]
                
                for selector in selectors:
                    try:
                        elements = driver.find_elements(By.CSS_SELECTOR, selector)
                        if elements:
                            topic_elements = elements
                            print(f"  使用选择器 '{selector}' 找到 {len(elements)} 个元素")
                            break
                    except:
                        continue
                
                if topic_elements:
                    print(f"  当前页面找到 {len(topic_elements)} 个内容元素，已采集 {len(all_topics)}/{target_count} 条")
                    
                    # 提取每篇文章的数据（不滚动到元素，避免回撤）
                    for idx, element in enumerate(topic_elements, 1):
                        try:
                            # 不滚动到元素，直接提取数据，避免回撤
                            # 提取文章数据
                            topic_data = extract_topic_from_element(element)
                            
                            # 获取 topic_id
                            topic_id = topic_data.get('topic_id', '')
                            
                            # 如果没有 topic_id，尝试从链接中提取
                            if not topic_id:
                                try:
                                    links = element.find_elements(By.CSS_SELECTOR, "a[href*='footprint']")
                                    for link in links:
                                        href = link.get_attribute('href')
                                        if href:
                                            topic_id_match = re.search(r'/footprint/(\d+)', href)
                                            if topic_id_match:
                                                topic_id = topic_id_match.group(1)
                                                break
                                except:
                                    pass
                            
                            # 如果仍未获取到 topic_id，生成一个临时ID
                            if not topic_id:
                                topic_id = f"temp_{len(all_topics)}_{idx}"
                            
                            # 如果未收集过，添加到列表
                            if topic_id not in collected_topic_ids:
                                # 构建完整的topic数据结构
                                author_name = topic_data.get('author', '未知作者')
                                full_topic = {
                                    'topic_id': topic_id,
                                    'create_time': topic_data.get('create_time', ''),
                                    'talk': {
                                        'text': topic_data.get('text', ''),
                                        'owner': {
                                            'name': author_name,
                                            'user_id': '',
                                            'avatar_url': topic_data.get('avatar_url', '')
                                        },
                                        'images': [{'url': url} for url in topic_data.get('images', [])]
                                    }
                                }
                                
                                # 如果没有 talk.text，尝试从其他字段获取
                                if not full_topic['talk']['text']:
                                    if 'text' in topic_data:
                                        full_topic['talk']['text'] = topic_data['text']
                                
                                all_topics.append(full_topic)
                                collected_topic_ids.add(topic_id)
                                
                                if len(all_topics) % 50 == 0 or len(all_topics) == target_count:
                                    print(f"  ✓ 已采集 {len(all_topics)}/{target_count} 条内容")
                                elif len(all_topics) <= 10 or len(all_topics) % 10 == 0:
                                    print(f"  ✓ 采集到内容 #{len(all_topics)}: {topic_id[:12] if len(topic_id) > 12 else topic_id}...")
                                
                                no_new_content_count = 0
                                
                                # 如果已达到目标数量，停止采集
                                if len(all_topics) >= target_count:
                                    print(f"  ✓ 已达到目标数量 {target_count} 条，停止采集")
                                    break
                        except Exception as e:
                            continue
                else:
                    print("  未找到内容元素")
                
            except Exception as e:
                print(f"提取内容时出错: {e}")
            
            # 如果已达到目标数量，退出循环
            if len(all_topics) >= target_count:
                break
            
            # 滚动页面（确保只向下滚动，不回撤）
            print(f"  滚动页面 ({scroll_count + 1}/{max_scrolls})，当前已采集 {len(all_topics)}/{target_count} 条...")
            
            # 获取当前滚动位置和页面高度
            current_scroll = driver.execute_script("return window.pageYOffset || document.documentElement.scrollTop || 0")
            page_height = driver.execute_script("return document.body.scrollHeight || document.documentElement.scrollHeight")
            
            # 计算距离底部的距离
            distance_to_bottom = page_height - current_scroll
            
            # 如果距离底部较远（>1000像素），使用增量滚动；否则直接滚动到底部
            if distance_to_bottom > 1000:
                # 使用增量滚动，每次向下滚动一定距离，确保不回撤
                scroll_increment = 800  # 每次向下滚动800像素
                target_scroll = min(current_scroll + scroll_increment, page_height - 100)
                
                # 确保目标滚动位置大于当前位置（只向下滚动）
                if target_scroll > current_scroll:
                    driver.execute_script(f"window.scrollTo(0, {target_scroll});")
                    last_scroll_position = target_scroll
                else:
                    # 如果已经接近底部，直接滚动到底部
                    driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                    last_scroll_position = driver.execute_script("return window.pageYOffset || document.documentElement.scrollTop || 0")
            else:
                # 接近底部，直接滚动到底部
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                last_scroll_position = driver.execute_script("return window.pageYOffset || document.documentElement.scrollTop || 0")
            
            time.sleep(2)  # 等待新内容加载
            
            # 检查是否有"加载更多"按钮并点击
            try:
                load_more_selectors = [
                    "button[class*='load']", "button[class*='more']",
                    ".load-more", ".more-btn", "[class*='load-more']",
                    "a[class*='load']", "a[class*='more']",
                    "button:contains('加载更多')", "a:contains('加载更多')"
                ]
                for selector in load_more_selectors:
                    try:
                        load_more_btn = driver.find_element(By.CSS_SELECTOR, selector)
                        if load_more_btn.is_displayed():
                            driver.execute_script("arguments[0].click();", load_more_btn)
                            print("  点击了'加载更多'按钮")
                            time.sleep(3)
                            break
                    except:
                        continue
            except:
                pass
            
            # 检查页面高度是否变化，并验证滚动位置（确保只向下滚动）
            new_height = driver.execute_script("return document.body.scrollHeight || document.documentElement.scrollHeight")
            new_scroll_position = driver.execute_script("return window.pageYOffset || document.documentElement.scrollTop || 0")
            
            # 验证滚动位置没有回撤（应该大于等于上次位置）
            if new_scroll_position < last_scroll_position:
                print(f"  警告：检测到滚动位置回撤（{new_scroll_position} < {last_scroll_position}），重新滚动到底部...")
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                time.sleep(1)
                new_scroll_position = driver.execute_script("return window.pageYOffset || document.documentElement.scrollTop || 0")
            
            last_scroll_position = new_scroll_position
            
            if new_height == last_height:
                no_new_content_count += 1
                print(f"  页面高度未变化 (连续: {no_new_content_count}/{max_no_new_content})，当前滚动位置: {int(new_scroll_position)}")
                
                if no_new_content_count >= max_no_new_content:
                    print("已连续多次无新内容，可能已加载完毕")
                    # 如果已采集的内容数量接近目标，继续尝试
                    if len(all_topics) < target_count * 0.9:
                        print("但未达到目标数量，继续尝试...")
                        no_new_content_count = 0
                    else:
                        break
            else:
                no_new_content_count = 0
                print(f"  页面高度变化: {last_height} -> {new_height}，继续滚动...")
            
            scroll_count += 1
        
        print("=" * 60)
        print(f"✓ 采集完成！共采集到 {len(all_topics)} 条内容（目标: {target_count} 条）")
        return all_topics
        
    except Exception as e:
        print(f"获取所有内容时出错: {e}")
        import traceback
        traceback.print_exc()
    
    return all_topics

def get_all_topics_by_author(driver, footprint_url):
    """通过模拟浏览器操作获取该作者的所有文章（参考Selenium IDE操作模式）"""
    all_topics = []
    
    try:
        # 参考 Selenium IDE 的 open 命令：先访问目标页面
        print("正在访问目标页面...")
        # 如果 URL 是相对路径，需要拼接基础 URL
        if footprint_url.startswith('/'):
            base_url = "https://wx.zsxq.com"
            full_url = base_url + footprint_url
        else:
            full_url = footprint_url
        driver.get(full_url)
        time.sleep(5)  # 等待页面加载
        
        # 参考 Selenium IDE 的 setWindowSize 命令：设置窗口大小 1234x1020
        try:
            driver.set_window_size(1234, 1020)
            print("窗口大小已设置为 1234x1020")
        except:
            pass
        
        # 等待页面加载完成，等待 Angular 应用加载
        try:
            WebDriverWait(driver, 20).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            # 等待 Angular 应用加载完成（app-footprint-topic 元素出现）
            WebDriverWait(driver, 20).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, "app-footprint-topic"))
            )
            print("页面加载完成")
        except:
            print("页面加载超时，继续尝试...")
        
        # 从当前页面提取第一篇文章信息，获取作者信息
        print("正在提取第一篇文章信息...")
        first_topic_data = extract_footprint_data(driver, full_url)
        
        if not first_topic_data:
            print("无法提取第一篇文章信息")
            return all_topics
        
        # 提取作者信息
        author_name = None
        author_id = None
        if 'talk' in first_topic_data and 'owner' in first_topic_data['talk']:
            owner = first_topic_data['talk']['owner']
            author_name = owner.get('name', '')
            author_id = owner.get('user_id', '')
        elif 'author' in first_topic_data:
            author_name = first_topic_data['author']
        
        if not author_name:
            print("无法获取作者信息")
            return all_topics
        
        print(f"✓ 获取到作者: {author_name}")
        
        print("开始采集该作者的所有文章...")
        print("=" * 60)
        
        collected_topic_ids = set()
        scroll_count = 0
        max_scrolls = 200  # 最大滚动次数
        no_new_content_count = 0
        max_no_new_content = 5  # 连续无新内容次数
        
        # 将第一篇文章添加到列表
        if first_topic_data:
            topic_id = first_topic_data.get('topic_id', '')
            if topic_id:
                collected_topic_ids.add(topic_id)
            all_topics.append(first_topic_data)
            print(f"  ✓ 采集到文章 #1")
        
        # 通过滚动页面加载更多内容（参考 Selenium IDE 的 runScript 命令）
        while scroll_count < max_scrolls:
            # 获取当前页面高度
            last_height = driver.execute_script("return document.body.scrollHeight")
            current_scroll = driver.execute_script("return window.pageYOffset || document.documentElement.scrollTop")
            
            # 提取当前页面上的所有文章元素（参考 Selenium IDE 中的 app-footprint-topic 元素）
            try:
                # 查找 Angular 应用中的文章元素：app-footprint-topic
                topic_elements = driver.find_elements(By.CSS_SELECTOR, "app-footprint-topic")
                
                if topic_elements:
                    print(f"  找到 {len(topic_elements)} 个文章元素")
                    
                    # 提取每篇文章的数据
                    for idx, element in enumerate(topic_elements, 1):
                        try:
                            # 参考 Selenium IDE：点击文章内容来获取详细信息
                            # 尝试点击文章内容区域（.content）
                            try:
                                content_elem = element.find_element(By.CSS_SELECTOR, ".content")
                                # 滚动到元素可见
                                driver.execute_script("arguments[0].scrollIntoView({behavior: 'smooth', block: 'center'});", content_elem)
                                time.sleep(0.5)
                                
                                # 点击内容区域（参考 Selenium IDE 的 click 命令）
                                driver.execute_script("arguments[0].click();", content_elem)
                                time.sleep(1)  # 等待内容加载
                            except:
                                pass
                            
                            # 提取文章数据
                            topic_data = extract_topic_from_element(element)
                            
                            # 检查是否是该作者的文章
                            topic_author = topic_data.get('author', '')
                            topic_id = topic_data.get('topic_id', '')
                            
                            # 如果作者匹配且未收集过
                            if topic_author == author_name and topic_id and topic_id not in collected_topic_ids:
                                # 构建完整的topic数据结构
                                full_topic = {
                                    'topic_id': topic_id,
                                    'create_time': topic_data.get('create_time', ''),
                                    'talk': {
                                        'text': topic_data.get('text', ''),
                                        'owner': {
                                            'name': author_name,
                                            'user_id': author_id or '',
                                            'avatar_url': topic_data.get('avatar_url', '')
                                        },
                                        'images': [{'url': url} for url in topic_data.get('images', [])]
                                    }
                                }
                                all_topics.append(full_topic)
                                collected_topic_ids.add(topic_id)
                                print(f"  ✓ 采集到文章 #{len(all_topics)}: {topic_id[:8] if topic_id else '未知'}... (元素 #{idx})")
                                no_new_content_count = 0
                            elif not topic_id:
                                # 尝试从元素中提取 topic_id
                                try:
                                    # 查找包含 footprint 链接的元素
                                    links = element.find_elements(By.CSS_SELECTOR, "a[href*='footprint']")
                                    for link in links:
                                        href = link.get_attribute('href')
                                        if href:
                                            topic_id_match = re.search(r'/footprint/(\d+)', href)
                                            if topic_id_match:
                                                topic_id = topic_id_match.group(1)
                                                if topic_id not in collected_topic_ids:
                                                    # 访问这个链接获取详细信息
                                                    try:
                                                        driver.execute_script("window.open(arguments[0], '_blank');", href)
                                                        time.sleep(2)
                                                        # 切换到新窗口
                                                        driver.switch_to.window(driver.window_handles[-1])
                                                        # 提取数据
                                                        topic_data = extract_footprint_data(driver, href)
                                                        if topic_data:
                                                            all_topics.append(topic_data)
                                                            collected_topic_ids.add(topic_id)
                                                            print(f"  ✓ 采集到文章 #{len(all_topics)}: {topic_id[:8]}...")
                                                        # 关闭新窗口并切换回原窗口
                                                        driver.close()
                                                        driver.switch_to.window(driver.window_handles[0])
                                                        time.sleep(1)
                                                    except:
                                                        # 如果新窗口操作失败，切换回原窗口
                                                        if len(driver.window_handles) > 1:
                                                            driver.close()
                                                            driver.switch_to.window(driver.window_handles[0])
                                                break
                                except:
                                    pass
                        except Exception as e:
                            continue
                else:
                    print("  未找到文章元素")
                
            except Exception as e:
                print(f"提取文章时出错: {e}")
            
            # 参考 Selenium IDE 的 runScript 命令：滚动页面
            # 使用平滑滚动到底部
            print(f"  滚动页面 ({scroll_count + 1}/{max_scrolls})...")
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(2)  # 等待新内容加载（Angular 应用需要时间渲染）
            
            # 检查是否有"加载更多"按钮并点击
            try:
                load_more_selectors = [
                    "button[class*='load']", "button[class*='more']",
                    ".load-more", ".more-btn", "[class*='load-more']",
                    "a[class*='load']", "a[class*='more']"
                ]
                for selector in load_more_selectors:
                    try:
                        load_more_btn = driver.find_element(By.CSS_SELECTOR, selector)
                        if load_more_btn.is_displayed():
                            driver.execute_script("arguments[0].click();", load_more_btn)
                            print("  点击了'加载更多'按钮")
                            time.sleep(3)
                            break
                    except:
                        continue
            except:
                pass
            
            # 检查页面高度是否变化
            new_height = driver.execute_script("return document.body.scrollHeight")
            if new_height == last_height:
                no_new_content_count += 1
                print(f"  页面高度未变化 (连续: {no_new_content_count}/{max_no_new_content})")
                
                if no_new_content_count >= max_no_new_content:
                    print("已连续多次无新内容，可能已加载完毕")
                    break
            else:
                no_new_content_count = 0
            
            scroll_count += 1
        
        print("=" * 60)
        print(f"✓ 采集完成！共采集到 {len(all_topics)} 篇文章")
        return all_topics
        
    except Exception as e:
        print(f"获取所有文章时出错: {e}")
        import traceback
        traceback.print_exc()
    
    return all_topics

def extract_footprint_data(driver, footprint_url):
    """从 footprint 页面提取数据"""
    print(f"正在访问页面: {footprint_url}")
    driver.get(footprint_url)
    
    # 等待页面加载
    time.sleep(5)
    
    # 尝试等待关键元素加载
    try:
        WebDriverWait(driver, 20).until(
            EC.presence_of_element_located((By.TAG_NAME, "body"))
        )
    except:
        print("页面加载超时，继续尝试提取数据...")
    
    # 滚动页面以确保内容加载
    driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
    time.sleep(2)
    
    # 提取页面数据
    footprint_data = {}
    
    try:
        # 方法1: 尝试通过 API 获取（最可靠的方法）
        print("方法1: 尝试通过 API 获取数据...")
        topic_id_match = re.search(r'/footprint/(\d+)', footprint_url)
        if topic_id_match:
            topic_id = topic_id_match.group(1)
            # 获取 cookies
            cookies = driver.get_cookies()
            cookie_string = "; ".join([f"{cookie['name']}={cookie['value']}" for cookie in cookies])
            
            # 尝试调用 API
            api_url = f"https://api.zsxq.com/v2/topics/{topic_id}"
            headers = {
                'Cookie': cookie_string,
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
                'Referer': footprint_url
            }
            try:
                response = requests.get(api_url, headers=headers, timeout=10)
                if response.status_code == 200:
                    api_data = response.json()
                    if 'resp_data' in api_data and 'topic' in api_data['resp_data']:
                        footprint_data = api_data['resp_data']['topic']
                        print("✓ 成功通过 API 获取数据")
                        return footprint_data
                    else:
                        print(f"API 返回数据格式异常: {api_data}")
                else:
                    print(f"API 请求失败，状态码: {response.status_code}")
            except Exception as e:
                print(f"API 请求异常: {e}")
        
        # 方法2: 尝试从页面中提取 JSON 数据
        print("方法2: 尝试从页面中提取 JSON 数据...")
        page_source = driver.page_source
        
        # 查找多种可能的 JSON 数据格式
        json_patterns = [
            r'window\.__INITIAL_STATE__\s*=\s*({.*?});',
            r'window\.__PRELOADED_STATE__\s*=\s*({.*?});',
            r'<script[^>]*>.*?({.*?"topic".*?}).*?</script>',
            r'<script[^>]*>.*?({.*?"talk".*?}).*?</script>',
        ]
        
        for pattern in json_patterns:
            match = re.search(pattern, page_source, re.DOTALL)
            if match:
                json_str = match.group(1)
                try:
                    initial_state = json.loads(json_str)
                    print("✓ 成功提取页面 JSON 数据")
                    # 查找 topic 数据
                    if 'topic' in initial_state:
                        footprint_data = initial_state['topic']
                        return footprint_data
                    elif 'resp_data' in initial_state and 'topic' in initial_state['resp_data']:
                        footprint_data = initial_state['resp_data']['topic']
                        return footprint_data
                    elif 'footprint' in initial_state:
                        footprint_data = initial_state['footprint']
                        return footprint_data
                except json.JSONDecodeError as e:
                    continue
        
        # 方法3: 从 DOM 元素提取（备用方案）
        print("方法3: 尝试从 DOM 元素提取数据...")
        
        # 提取作者信息
        try:
            selectors = [
                "div[class*='user'] span[class*='name']",
                ".user-name",
                ".author-name",
                "[data-author]",
                "span[class*='nickname']"
            ]
            for selector in selectors:
                elements = driver.find_elements(By.CSS_SELECTOR, selector)
                if elements:
                    footprint_data['author'] = elements[0].text.strip()
                    break
        except:
            pass
        
        # 提取头像
        try:
            selectors = [
                "img[class*='avatar']",
                "img[class*='user']",
                ".avatar img",
                "[data-avatar]"
            ]
            for selector in selectors:
                elements = driver.find_elements(By.CSS_SELECTOR, selector)
                if elements:
                    src = elements[0].get_attribute('src')
                    if src:
                        footprint_data['avatar_url'] = src
                        break
        except:
            pass
        
        # 提取日期
        try:
            selectors = [
                ".time",
                ".date",
                "[class*='time']",
                "[data-time]",
                "time"
            ]
            for selector in selectors:
                elements = driver.find_elements(By.CSS_SELECTOR, selector)
                if elements:
                    footprint_data['create_time'] = elements[0].text.strip() or elements[0].get_attribute('datetime')
                    if footprint_data['create_time']:
                        break
        except:
            pass
        
        # 提取内容
        try:
            selectors = [
                ".content",
                ".text",
                "[class*='content']",
                "[class*='text']",
                "article",
                ".post-content"
            ]
            for selector in selectors:
                elements = driver.find_elements(By.CSS_SELECTOR, selector)
                if elements:
                    footprint_data['text'] = elements[0].text.strip()
                    break
        except:
            pass
        
        # 提取图片
        try:
            image_elements = driver.find_elements(By.CSS_SELECTOR, "img[class*='image'], img[class*='photo'], .content img, .post-content img")
            footprint_data['images'] = []
            for img in image_elements:
                src = img.get_attribute('src') or img.get_attribute('data-src')
                if src and 'avatar' not in src.lower() and 'logo' not in src.lower() and 'icon' not in src.lower():
                    footprint_data['images'].append(src)
        except:
            pass
        
        if footprint_data:
            print("✓ 成功从 DOM 元素提取部分数据")
        
    except Exception as e:
        print(f"提取数据时出错: {e}")
        import traceback
        traceback.print_exc()
    
    return footprint_data

def create_word_document(all_topics, avatar_path, author, output_dir, image_dir):
    """创建 Word 文档，支持多个文章，头像只在开头显示一次"""
    doc = Document()
    
    # 设置文档标题
    title = doc.add_heading('知识星球 Footprint 详情', 0)
    
    # 添加作者信息（只在开头显示一次）
    author_heading = doc.add_heading(f'作者: {author}', level=1)
    
    # 添加头像（只在开头显示一次）
    if avatar_path and os.path.exists(avatar_path):
        try:
            doc.add_picture(avatar_path, width=Inches(1.5))
            doc.add_paragraph()  # 添加空行
        except Exception as e:
            print(f"添加头像到 Word 时出错: {e}")
    
    doc.add_paragraph()  # 添加空行分隔
    
    # 处理所有文章
    for idx, topic in enumerate(all_topics, 1):
        print(f"正在处理第 {idx}/{len(all_topics)} 篇文章...")
        
        # 添加文章分隔
        if idx > 1:
            doc.add_paragraph('=' * 50)
            doc.add_paragraph()  # 添加空行
        
        # 添加日期
        create_time = topic.get('create_time', '未知时间')
        if create_time and create_time != '未知时间':
            try:
                create_time = format_create_time(create_time)
            except:
                pass
        
        date_para = doc.add_paragraph(f'发布时间: {create_time}')
        date_para.runs[0].font.size = Pt(12)
        date_para.runs[0].bold = True
        doc.add_paragraph()  # 添加空行
        
        # 添加内容
        text = ''
        if 'talk' in topic:
            text = topic['talk'].get('text', '')
        elif 'text' in topic:
            text = topic['text']
        
        if text:
            doc.add_heading('内容', level=2)
            # 处理文本，支持换行
            text_lines = text.split('\n')
            for line in text_lines:
                if line.strip():
                    content_para = doc.add_paragraph(line.strip())
                    content_para.runs[0].font.size = Pt(11)
            doc.add_paragraph()  # 添加空行
        
        # 添加图片
        images = []
        if 'talk' in topic and 'images' in topic['talk']:
            images = topic['talk']['images']
        elif 'images' in topic:
            images = topic['images']
        
        if images:
            doc.add_heading('配图', level=2)
            for image in images:
                if isinstance(image, dict):
                    # 优先使用 large 尺寸
                    large_url = image.get('large', {}).get('url', '') or \
                               image.get('medium', {}).get('url', '') or \
                               image.get('small', {}).get('url', '') or \
                               image.get('url', '')
                    image_id = image.get('image_id', '')
                elif isinstance(image, str):
                    large_url = image
                    image_id = f"img_{idx}_{images.index(image)}"
                else:
                    continue
                
                if large_url:
                    # 下载图片
                    img_path = download_image(large_url, image_id, image_dir)
                    if img_path and os.path.exists(img_path):
                        try:
                            # 获取图片尺寸，调整大小
                            img = Image.open(img_path)
                            width, height = img.size
                            # 限制最大宽度为 5 英寸，保持比例
                            max_width = Inches(5)
                            if width > 0:
                                ratio = height / width
                                display_width = min(max_width, Inches(width / 96))  # 假设 96 DPI
                            else:
                                display_width = max_width
                            
                            doc.add_picture(img_path, width=display_width)
                            doc.add_paragraph()  # 添加空行
                        except Exception as e:
                            print(f"添加图片到 Word 时出错: {e}")
        
        doc.add_paragraph()  # 文章结尾添加空行
    
    # 保存文档
    safe_author = author.replace(" ", "_").replace("/", "_").replace("\\", "_")
    timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    filename = f"{safe_author}_所有内容_{len(all_topics)}条_{timestamp}.docx"
    filepath = os.path.join(output_dir, filename)
    doc.save(filepath)
    print(f"Word 文档已保存: {filepath}")
    return filepath

def main():
    """主函数"""
    footprint_url = "https://wx.zsxq.com/footprint/48828551452428"
    target_count = 3970  # 目标采集数量
    
    # 创建输出目录
    output_dir = "footprint_output"
    image_dir = os.path.join(output_dir, "images")
    avatar_dir = os.path.join(output_dir, "avatars")
    os.makedirs(image_dir, exist_ok=True)
    os.makedirs(avatar_dir, exist_ok=True)
    
    # 使用 Selenium 启动浏览器
    print("正在启动浏览器...")
    # 设置 Edge 驱动路径
    driver_path = os.path.join(os.path.dirname(__file__), "edgedriver_win64", "msedgedriver.exe")
    if os.path.exists(driver_path):
        service = Service(driver_path)
        driver = webdriver.Edge(service=service)
        print(f"使用本地驱动: {driver_path}")
    else:
        # 如果本地驱动不存在，使用系统 PATH 中的驱动
        driver = webdriver.Edge()
        print("使用系统 PATH 中的 Edge 驱动")
    
    try:
        # 先访问登录页面
        driver.get('https://wx.zsxq.com/')
        print("请使用微信扫码登录知识星球...")
        input("登录完成后，按 Enter 键继续...")
        
        # 从指定页面采集所有内容（目标: 3970 条）
        print(f"开始采集页面中的所有内容（目标: {target_count} 条）...")
        all_topics = get_all_topics_from_page(driver, footprint_url, target_count=target_count)
        
        if not all_topics:
            print("无法获取内容数据，程序终止。")
            return
        
        print(f"✓ 成功获取 {len(all_topics)} 条内容，开始处理...")
        
        # 从第一条内容提取作者和头像信息（用于文档标题）
        author = '知识星球内容'
        avatar_path = None
        
        if all_topics:
            first_topic = all_topics[0]
            if 'talk' in first_topic and 'owner' in first_topic['talk']:
                owner = first_topic['talk']['owner']
                author = owner.get('name', '知识星球内容')
                avatar_url = owner.get('avatar_url', '')
                if avatar_url:
                    avatar_path = download_and_process_avatar(avatar_url, author, avatar_dir)
            elif 'author' in first_topic:
                author = first_topic.get('author', '知识星球内容')
                avatar_url = first_topic.get('avatar_url', '')
                if avatar_url:
                    avatar_path = download_and_process_avatar(avatar_url, author, avatar_dir)
        
        # 创建 Word 文档
        create_word_document(all_topics, avatar_path, author, output_dir, image_dir)
        
        print("所有操作完成！")
        
    except Exception as e:
        print(f"程序执行出错: {e}")
        import traceback
        traceback.print_exc()
    finally:
        input("按 Enter 键关闭浏览器...")
        driver.quit()

if __name__ == "__main__":
    main()

