import requests
import os
import time
from bs4 import BeautifulSoup
import re
from docx import Document
from docx.shared import RGBColor
from docx.oxml.ns import qn
import json
import argparse

class ZhihuCollectionToWord:
    def __init__(self, cookies=None, save_folder=None):
        """
        初始化爬虫
        :param cookies: Cookie字典，如果为None则从配置文件读取
        :param save_folder: 保存文件夹路径，如果为None则使用默认路径
        """
        self.desktop_path = os.path.join(os.path.expanduser("~"), "Desktop")
        self.save_folder = save_folder or os.path.join(self.desktop_path, "知乎收藏-Word版")
        os.makedirs(self.save_folder, exist_ok=True)
        
        # 从参数或配置文件加载cookies
        if cookies:
            self.cookies = cookies
        else:
            self.cookies = self.load_cookies_from_config()
        
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Referer': 'https://www.zhihu.com/',
            'x-api-version': '3.0',
            'x-requested-with': 'fetch',
            'x-zse-93': '101_3_3.0'
        }

    def load_cookies_from_config(self):
        """从配置文件加载cookies"""
        config_file = os.path.join(os.path.dirname(__file__), "config.json")
        
        if os.path.exists(config_file):
            try:
                with open(config_file, 'r', encoding='utf-8') as f:
                    config = json.load(f)
                    return config.get('cookies', {})
            except Exception as e:
                print(f"读取配置文件失败: {e}")
                return {}
        else:
            # 创建默认配置文件
            self.create_default_config(config_file)
            print(f"\n⚠️ 配置文件不存在，已创建默认配置文件: {config_file}")
            print("请编辑配置文件，填入你的知乎Cookie信息后重新运行程序\n")
            return {}
    
    def create_default_config(self, config_file):
        """创建默认配置文件"""
        default_config = {
            "cookies": {
                "SESSIONID": "你的SESSIONID",
                "JOID": "你的JOID",
                "osd": "你的osd",
                "_xsrf": "你的_xsrf",
                "_zap": "你的_zap",
                "d_c0": "你的d_c0",
                "captcha_session_v2": "你的captcha_session_v2",
                "z_c0": "你的z_c0"
            },
            "collection_id": "你的收藏夹ID",
            "limit": 20,
            "comment": "请填入你的知乎Cookie信息。获取方法：登录知乎网页版，F12打开开发者工具，刷新页面，在Network标签中找到任意请求，复制Cookie值"
        }
        try:
            with open(config_file, 'w', encoding='utf-8') as f:
                json.dump(default_config, f, ensure_ascii=False, indent=4)
        except Exception as e:
            print(f"创建配置文件失败: {e}")

    def get_collection_items(self, collection_id, limit=20, offset=0):
        """
        获取收藏内容
        :param collection_id: 收藏夹ID
        :param limit: 获取数量
        :param offset: 偏移量
        """
        api_url = f"https://www.zhihu.com/api/v4/collections/{collection_id}/items"
        params = {'offset': offset, 'limit': limit}
        
        try:
            print("正在获取收藏内容...")
            response = requests.get(api_url, headers=self.headers, cookies=self.cookies, params=params, timeout=10)
            
            if response.status_code == 200:
                data = response.json()
                return self.process_items(data.get('data', []))
            else:
                print(f"请求失败，状态码: {response.status_code}")
                return None
        except Exception as e:
            print(f"获取收藏时出错: {e}")
            return None

    def process_items(self, items):
        """处理收藏项"""
        collections = []
        for i, item in enumerate(items):
            try:
                content = item.get('content', {})
                if content.get('type') == 'answer':
                    title = content.get('question', {}).get('title', f'收藏{i+1}')
                    answer_content = content.get('content', '')
                elif content.get('type') == 'article':
                    title = content.get('title', f'收藏{i+1}')
                    answer_content = content.get('content', '')
                else:
                    continue
                
                # 清理HTML标签并格式化文本
                soup = BeautifulSoup(answer_content, 'html.parser')
                clean_content = self.extract_and_format_html(soup)
                
                collections.append({
                    'title': title,
                    'content': clean_content,
                    'index': i+1
                })
                
                print(f"成功提取第 {i+1} 个收藏: {title[:30]}...")
                time.sleep(1)
                
            except Exception as e:
                print(f"处理第 {i+1} 个收藏时出错: {e}")
                continue
        
        return collections

    def extract_and_format_html(self, soup):
        """从HTML中提取并格式化文本，保持良好的段落结构"""
        if not soup:
            return ""
        
        paragraphs = []
        processed = set()  # 记录已处理的元素，避免重复
        
        # 按照优先级处理不同的HTML标签
        # 优先处理段落级元素
        for tag_name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'blockquote', 'li', 'pre', 'code']:
            elements = soup.find_all(tag_name)
            for element in elements:
                # 跳过已处理的元素或其父元素已被处理
                if element in processed:
                    continue
                
                text = element.get_text(separator=' ', strip=True)
                if not text or len(text) <= 1:
                    continue
                
                # 根据标签类型格式化
                if tag_name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
                    paragraphs.append(f"【{text}】")
                elif tag_name == 'blockquote':
                    paragraphs.append(f"「{text}」")
                elif tag_name == 'li':
                    paragraphs.append(f"• {text}")
                elif tag_name in ['pre', 'code']:
                    # 代码块保持原格式
                    paragraphs.append(f"```\n{text}\n```")
                else:
                    paragraphs.append(text)
                
                processed.add(element)
        
        # 如果没有提取到任何段落，直接提取所有文本
        if not paragraphs:
            text = soup.get_text(separator=' ', strip=True)
            if text:
                # 尝试按换行分割成段落
                lines = text.split('\n')
                for line in lines:
                    line = line.strip()
                    if line and len(line) > 1:
                        paragraphs.append(line)
        
        # 合并段落
        if not paragraphs:
            return ""
        
        content = '\n\n'.join(paragraphs)
        
        # 进一步清理和格式化
        content = self.format_content(content)
        
        return content

    def format_content(self, text):
        """格式化文本内容，提高可读性"""
        if not text:
            return ""
        
        # 1. 统一空白字符：多个空格/制表符合并为一个空格
        text = re.sub(r'[ \t]+', ' ', text)
        
        # 2. 清理行首行尾空格
        lines = text.split('\n')
        lines = [line.strip() for line in lines]
        text = '\n'.join(lines)
        
        # 3. 移除空行（由已有的\n\n分隔段落）
        lines = [line for line in lines if line]
        text = '\n\n'.join(lines)
        
        # 4. 移除段首的无意义标点符号
        text = re.sub(r'\n\n([，。、,.])', r'\n\n', text)
        text = re.sub(r'^([，。、,.])', '', text)
        
        # 5. 清理连续的标点符号（如多个逗号、句号）
        text = re.sub(r'([，。！？,.])\1+', r'\1', text)
        
        # 6. 处理代码块的格式
        text = re.sub(r'```\s*\n\s*```', '', text)  # 移除空代码块
        
        # 7. 最终清理：确保没有超过一个连续空行
        text = re.sub(r'\n{3,}', '\n\n', text)
        
        return text.strip()

    def save_to_word(self, collections):
        """保存为格式良好的Word文档"""
        if not collections:
            print("没有内容需要保存")
            return 0
            
        saved_count = 0
        for item in collections:
            try:
                # 创建Word文档
                doc = Document()
                
                # 设置中文字体
                self.set_chinese_font(doc)
                
                # 添加标题
                title_paragraph = doc.add_heading(f'第{item["index"]}篇：{item["title"]}', level=1)
                title_paragraph.alignment = 1  # 居中
                
                # 添加分隔线
                doc.add_paragraph('=' * 60)
                
                # 处理内容段落
                content_paragraph = doc.add_paragraph()
                
                # 按段落分割内容
                paragraphs = item['content'].split('\n\n')
                for i, para in enumerate(paragraphs):
                    if para.strip():  # 跳过空段落
                        if i > 0:
                            content_paragraph = doc.add_paragraph()
                        
                        # 设置段落格式
                        run = content_paragraph.add_run(para.strip())
                        run.font.size = 200000  # 10磅
                        run.font.color.rgb = RGBColor(0, 0, 0)  # 黑色
                
                # 添加页脚信息
                doc.add_paragraph()
                footer = doc.add_paragraph(f'保存时间: {time.strftime("%Y-%m-%d %H:%M:%S")}')
                footer.alignment = 2  # 右对齐
                
                # 保存文件
                safe_title = re.sub(r'[\\/*?:"<>|]', '', item['title'])[:40].strip()
                if not safe_title:
                    safe_title = f"收藏_{item['index']}"
                
                filename = f"{item['index']:02d}_{safe_title}.docx"
                filepath = os.path.join(self.save_folder, filename)
                doc.save(filepath)
                
                print(f"已保存Word文档: {filename}")
                saved_count += 1
                
            except Exception as e:
                print(f"保存Word文档 {item['title']} 时出错: {e}")
                continue
        
        return saved_count

    def set_chinese_font(self, doc):
        """设置中文字体"""
        try:
            # 设置全局字体
            doc.styles['Normal'].font.name = '微软雅黑'
            doc.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), '微软雅黑')
        except:
            pass  # 如果字体设置失败，使用默认字体

def load_config():
    """加载配置文件"""
    config_file = os.path.join(os.path.dirname(__file__), "config.json")
    
    if os.path.exists(config_file):
        try:
            with open(config_file, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            print(f"读取配置文件失败: {e}")
            return {}
    return {}

def main():
    print("=== 知乎收藏转Word文档 ===\n")
    
    # 创建命令行参数解析器
    parser = argparse.ArgumentParser(description='知乎收藏转Word文档工具')
    parser.add_argument('-c', '--collection-id', type=str, help='收藏夹ID')
    parser.add_argument('-l', '--limit', type=int, help='要保存的收藏数量')
    parser.add_argument('-o', '--offset', type=int, default=0, help='偏移量（从第几个开始）')
    parser.add_argument('-f', '--folder', type=str, help='保存文件夹路径')
    parser.add_argument('--config', type=str, help='配置文件路径（默认为config.json）')
    
    args = parser.parse_args()
    
    # 加载配置文件
    config = load_config()
    
    # 命令行参数优先级高于配置文件
    collection_id = args.collection_id or config.get('collection_id')
    limit = args.limit or config.get('limit', 20)
    offset = args.offset
    save_folder = args.folder
    
    # 检查收藏夹ID
    if not collection_id or collection_id == "你的收藏夹ID":
        print("❌ 错误: 未设置收藏夹ID")
        print("\n使用方法:")
        print("  1. 通过命令行参数: python ZhihuCollectionExporter.py -c 你的收藏夹ID -l 20")
        print("  2. 在config.json配置文件中设置collection_id字段")
        print("\n提示: 收藏夹ID可以在收藏夹URL中找到，格式如: https://www.zhihu.com/collection/839821155")
        return
    
    # 创建爬虫实例
    crawler = ZhihuCollectionToWord(save_folder=save_folder)
    
    # 检查cookies是否有效
    if not crawler.cookies or list(crawler.cookies.values())[0].startswith("你的"):
        print("❌ 错误: Cookie未配置或配置不正确")
        print("请编辑config.json文件，填入正确的Cookie信息")
        return
    
    print(f"📁 保存路径: {crawler.save_folder}")
    print(f"📋 收藏夹ID: {collection_id}")
    print(f"📊 获取数量: {limit}")
    print(f"🔢 偏移量: {offset}\n")
    
    # 获取并保存收藏
    collections = crawler.get_collection_items(collection_id, limit=limit, offset=offset)
    if collections:
        saved_count = crawler.save_to_word(collections)
        print(f"\n=== ✅ 完成 ===")
        print(f"成功保存 {saved_count} 个Word文档")
        print(f"保存路径: {crawler.save_folder}")
    else:
        print("\n❌ 未能获取到收藏内容，请检查:")
        print("  1. Cookie是否正确且未过期")
        print("  2. 收藏夹ID是否正确")
        print("  3. 网络连接是否正常")

if __name__ == "__main__":
    main()
